text stringlengths 38 1.54M |
|---|
# Copyright (c) Dec 22, 2014 CareerMonk Publications and others.
# E-Mail : info@careermonk.com
# Creation Date : 2014-01-10 06:15:46
# Last modification : 2008-10-31
# by : Narasimha Karumanchi
# Book Title : Data Structures And Algorithms Made In Java
# Warranty : This software is provided "as is" without any
# warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
def orderedInsert(self, item):
current = self.head
previous = None
stop = False
while current != None and not stop:
if current.get_data() > item:
stop = True
else:
previous = current
current = current.get_next()
temp = Node(item)
if previous == None:
temp.set_next(self.head)
self.head = temp
else:
temp.set_next(current)
previous.set_next(temp)
|
import unittest
from core.common import download
import os
from datetime import datetime
TEST_FILE_PATH = 'core/test/common/test_file.html'
TEST_URL = 'https://www.google.ie/'
class DownloadTest(unittest.TestCase):
@classmethod
def clean(cls):
if os.path.isfile(TEST_FILE_PATH):
os.remove(TEST_FILE_PATH)
def setUp(self):
self.clean()
def tearDown(self):
self.clean()
def test_download_data(self):
self.assertFalse(os.path.isfile(TEST_FILE_PATH))
download.download_data(TEST_URL, TEST_FILE_PATH)
self.assertTrue(os.path.isfile(TEST_FILE_PATH))
file_time = datetime.fromtimestamp(os.path.getmtime(TEST_FILE_PATH))
download.download_data(TEST_URL, TEST_FILE_PATH)
new_file_time = datetime.fromtimestamp(os.path.getmtime(TEST_FILE_PATH))
self.assertEqual(file_time, new_file_time)
def test_get_page(self):
test_page = download.get_page(TEST_URL)
self.assertTrue(test_page)
|
# 핸드폰 번호 가리기
# 문제 설명
# 프로그래머스 모바일은 개인정보 보호를 위해 고지서를 보낼 때 고객들의 전화번호의 일부를 가립니다.
# 전화번호가 문자열 phone_number로 주어졌을 때, 전화번호의 뒷 4자리를 제외한 나머지 숫자를 전부 *으로 가린 문자열을 리턴하는 함수, solution을 완성해주세요.
# 제한 조건
# s는 길이 4 이상, 20이하인 문자열입니다.
def solution(phone_number):
phone_number = list(phone_number)
for i in range(len(phone_number)-5, -1, -1):
phone_number[i] = '*'
return ''.join(phone_number)
|
import math
print("enter the first complex number")
x=complex(float((input("Enter the real part"))),float((input("enter the imaginary part"))))
print("enter the second complex number")
y=complex(float((input("Enter the real part"))),float((input("enter the imaginary part"))))
print(complex(x+y))
print(complex(x-y))
print(complex(x*y))
print(complex(x/y))
print(mo=math.sqrt(pow(x.real,2)+pow(x.imag,2)))
|
SPLAT_FORM_URL = "http://www.cv.nrao.edu/php/splat/c_export.php"
HIT_LIMIT = 2000
SPLATALOGUE_TIMEOUT = 30
"""
NB this script has a export limit of 2000 hits.
Change HIT_LIMIT to accomodate your needs.
"""
__all__ = ['search']
import numpy as np
try:
from astropy.table import Table
use_astropy = True
except (ImportError):
use_astropy = False
import numpy as _np
"""
TODO : improve the parsing of the astropy.table output
TODO : get partition values/function for certain molecule?
TODO : simple escape probability calculation for transitions
of molecule to estimate strengths?
TODO : be able to search on molecular species
either just parse the output (grep type)
or
actually search for specific molecule (sid[])
TODO : create pretty printing for on screen results
in the bin directory
TODO : add help docs
explain how to use it
TODO : clean up the QN strings in the results
"""
#~ The urllib2 module has been split across several modules in Python 3.0
#~ named urllib.request and urllib.error. The 2to3 tool will automatically adapt imports when converting your sources to 3
# used for Python 2:
# urllib.urlencode
# urllib2.Request
# urllib2.urlopen
try:
# For Python 3.0 and later
from urllib.request import urlopen, Request
from urllib.parse import urlencode
except ImportError:
# Fall back to Python 2's urllib2
from urllib import urlencode
from urllib2 import urlopen, Request
###############################################################################
def search( freq = [203.4, 203.42],
fwidth = None,
funit = 'GHz',
linelist = ['lovas', 'slaim', 'jpl', 'cdms', 'toyama', 'osu', 'recomb', 'lisa', 'rfi'],
efrom = None,
eto = None,
eunit = None, # 'el_cm1', 'eu_cm1', 'el_k', 'eu_k'
trans = None,
lill = None, # line intensity lower limits, 'cdms_jpl', 'sijmu2', 'aij'
**settings):
"""
settings:
----------
version : 2.0, 1.0, all
otype : output type
astropy.table - an astropy.table table
"""
# prepare some of the input
# lowercase insensitive input.
if type(linelist) == type([]):
linelist = [i.lower() for i in linelist]
else:
lineliest = [linelist.lower()]
#~ _check_input(freq, fwidth, funit, linelist, efrom, eto, eunit,
#~ transition, lill,**settings)
# parameters list, to be urlencoded later
parameters = []
parameters.extend( _parameters_preamble() )
parameters.extend( _parse_linelist( linelist ) )
parameters.extend( _parse_settings( settings ) )
parameters.extend( _parse_frequency(freq, fwidth, funit) )
parameters.extend( _parse_linelist( linelist ) )
if ((efrom is not None) or (eto is not None)):
parameters.extend( _parse_erange( efrom, eto, eunit ) )
if (trans is not None):
parameters.extend( _parse_transition( trans ) )
if (lill is not None):
parameters.extend( _parse_lill( lill ) )
parameters.extend( _parameters_ending() )
results = _get_results(parameters)
if settings.has_key('otype'):
results = _parse_results(results, settings['otype'])
else:
results = _parse_results(results, 'astropy.table')
return results
def _parameters_preamble():
"""
Set the default display parameters, i.e. display
everything in the result table
# Energy level display (triggered)
# 1 : Elower (cm-1)
# 2 : Elower (K)
# 3 : Eupper (cm-1)
# 4 : Eupper (K)
el1=el1
el2=el2
el3=el3
el4=el4
# Line strength display (triggered)
# 1 : CDMS/JPL Intensity
# 2 : Sij mu2
# 3 : Sij
# 4 : Aij
# 5 : Lovas/AST
ls1=ls1
ls2=ls2
ls3=ls3
ls4=ls4
ls5=ls5
# Display Unresolved quantum numbers (triggered)
# always on
#~ show_unres_qn=show_unres_qn
# Show upper degeneracy (triggered)
# always on
#~ show_upper_degeneracy=show_upper_degeneracy
# Display Molecule Tag (triggered)
# always on
#~ show_molecule_tag=show_molecule_tag
# No HFS Display (triggered)
# not included
#~ noHFS=noHFS
# Display HFS Intensity (triggered)
# always on
#~ displayHFS=displayHFS
# Display Quantum Number Code (triggered)
# always on
#~ show_qn_code=show_qn_code
# Display Lab Ref (triggered)
# always off
#~ show_lovas_labref=show_lovas_labref
# Display Obs Ref (triggered)
# always off
#~ show_lovas_obsref=show_lovas_obsref
# Display Ordered Frequency ONLY (triggered)
#~ show_orderedfreq_only=show_orderedfreq_only
# Display NRAO Recommended Frequencies (triggered)
#~ show_nrao_recommended=show_nrao_recommended
SUMMARY :
ALWAYS ON-----------------------------------
Display HFS Intensity
Display Unresolved Quantum Numbers
Display Upper State Degeneracy
Display Molecule Tag
Display Quantum Number Code
Display NRAO Recommended Frequencies
E_levels (all)
Line Strength Display (all)
Display Ordered Frequency ONLY (only one frequency to parse)
--------------------------------------------
"""
returnlist = [
('submit', 'Search'),
('ls1','ls1'),
('ls2','ls2'),
('ls3','ls3'),
('ls4','ls4'),
('ls5','ls5'),
('el1', 'el1'),
('el2', 'el2'),
('el3', 'el3'),
('el4', 'el4'),
('show_unres_qn', 'show_unres_qn'),
('show_upper_degeneracy', 'show_upper_degeneracy'),
('show_molecule_tag', 'show_molecule_tag'),
('displayHFS', 'displayHFS'),
('show_qn_code', 'show_qn_code'),
#('show_lovas_labref', 'how_lovas_labref'), # Always OFF
#('show_lovas_obsref', 'show_lovas_obsref'), # Always OFF
#~ ('show_orderedfreq_only', 'show_orderedfreq_only'),
('show_nrao_recommended', 'show_nrao_recommended')
]
return returnlist
def _parse_linelist(linelist):
"""
Only search the requested line lists.
# line list (triggered)
# def all on
displayRecomb=displayRecomb
displayLovas=displayLovas
displaySLAIM=displaySLAIM
displayJPL=displayJPL
displayCDMS=displayCDMS
displayToyaMA=displayToyaMA
displayOSU=displayOSU
displayLisa=displayLisa
displayRFI=displayRFI
"""
returnlist = []
if 'lovas' in linelist:
returnlist.append(('displayLovas' ,'displayLovas'))
if 'slaim' in linelist:
returnlist.append(('displaySLAIM' ,'displaySLAIM'))
if 'jpl' in linelist:
returnlist.append(('displayJPL' ,'displayJPL'))
if 'cdms' in linelist:
returnlist.append(('displayCDMS' ,'displayCDMS'))
if 'toyama' in linelist:
returnlist.append(('displayToyaMA' ,'displayToyaMA'))
if 'osu' in linelist:
returnlist.append(('displayOSU' ,'displayOSU'))
if 'recomb' in linelist:
returnlist.append(('displayRecomb' ,'displayRecomb'))
if 'lisa' in linelist:
returnlist.append(('displayLisa' ,'displayLisa'))
if 'rfi' in linelist:
returnlist.append(('displayRFI' ,'displayRFI'))
return returnlist
def _set_bool(settings, key, param, default):
"""
help function to check the dictionary settings if key exsists,
and return a on (param, param) or off (empty) tuple depending in
settings[key] value or to the default (0:off, or 1:on ) value
"""
if settings.has_key( key ):
if not settings[key]: # if its False
return () # return empty list
elif settings[key]: # if its True
return (param, param)
else: # Else we set it to the default value
if default: # if default is On (i.e. 1)
return (param, param)
elif not default: # if default is Off (i.e. 0)
return ()
def _parse_settings( settings ):
"""
set the data release version of the splatalogue compilation
# data versions (choose)
# def v2.0
data_version=v2.0
or
data_version=v1.0
or
data_version=vall
"""
returnlist = []
# Data release version
# first parese the input
# def v2.0
if settings.has_key( 'version' ):
version = settings['version']
else:
version = '2.0'
# now set the parameter
# def v2.0
if str(version) in ['2.0', '2', '2.']:
returnlist.append(('data_version', 'v2.0'))
elif str(version) in ['1.0', '1', '1.']:
returnlist.append(('data_version', 'v1.0'))
elif str(version).lower() in ['all', 'al', 'a']:
returnlist.append(('data_version', 'vall'))
else:
returnlist.append(('data_version', 'vall'))
# Frequency error limit
# def off
# fel=fel
key = 'felim'
param = 'fel'
default = 0
returnlist.append( _set_bool(settings, key, param, default) )
# Exclude atmospheric species (triggered)
# def on
# no_atmospheric=no_atmospheric
key = 'no_atm'
param = 'no_atmospheric'
default = 1
returnlist.append( _set_bool(settings, key, param, default) )
# Show ONLY NRAO Recommended Freq (triggered)
# def off
#~ include_only_nrao=include_only_nrao
key = 'nrao'
param = 'include_only_nrao'
default = 0
returnlist.append( _set_bool(settings, key, param, default) )
# Exclude potential interstellar species (triggered)
# def on
#~ no_potential=no_potential
key = 'potential'
param = 'no_potential'
default = 1
returnlist.append( _set_bool(settings, key, param, default) )
# Exclude probable interstellar species (triggered)
# def on
#~ no_probable=no_probable
key = 'probable'
param = 'no_probable'
default = 1
returnlist.append( _set_bool(settings, key, param, default) )
# Exclude known AST species (triggered)
# def off
#~ known=known
key = 'known'
param = 'known'
default = 0
returnlist.append( _set_bool(settings, key, param, default) )
# stupid(!) hack to remove empty entries, need to just not add them...
while 1:
try:
returnlist.remove(())
except (ValueError):
break
return returnlist
def _parse_frequency(freq, fwidth, funit):
"""
# frequency
from=31
to=31
frequency_units=GHz
or
frequency_units=MHz
"""
returnlist = []
#### FREQUENCY
# Two casees:
# 1. A list with length two
# 2. A integer/float
if type(freq) == str:
raise(Exception, 'Wrong format for frequency. Need list or float')
# First guess : a list of floats with length two
try:
returnlist.append( ('from', str(freq[0])) )
returnlist.append( ('to', str(freq[1])) )
except (IndexError, TypeError):
# If not a list, should be a float, and fwidth given
try:
freq = float(freq)
except (ValueError):
raise (Exception, 'Wrong format for frequency. Need list or float')
if fwidth not in [0, 0.0, None]:
# with freq and fwidth given, we can calculate start and end
f1, f2 = freq + _np.array([-1,1]) * fwidth / 2.
returnlist.append( ('from', str(f1)) )
returnlist.append( ('to', str(f2)) )
else:
# the fwidth parameter is missing
raise (Exception, 'The fwidth parameter is missing. '
'Frequency parameter(s) malformed')
#### FREQUENCY UNIT
#
if funit not in [0, None]:
if funit.lower() in ['ghz', 'mhz']:
returnlist.append( ('frequency_units', funit) )
else:
print 'Allowed frequency units : \'GHz\' or \'MHz\''
elif not funit in [0, None]:
funit = 'GHz'
returnlist.append( ('frequency_units', 'GHz') )
return returnlist
def _parse_erange( efrom, eto, eunit ):
"""
# Energy range (triggered)
# but if one exists, the energy_range_type must exist
energy_range_from=10
energy_range_to=500
energy_range_type=eu_k
or
energy_range_type=el_k
or
energy_range_type=el_cm1
or
energy_range_type=eu_cm1
"""
returnlist = []
### Energy Range
# form['energy_range_from/to'] is a text field in the form
# while it is called e_from/to in the function
if efrom == None and eto == None and eunit != None:
print 'You gave the Enery range type keyword, but no energy range...'
raise Exception('energy range keywords malformed')
#~ if (efrom not None) or (eto not None):
eunit_ref = ['el_cm1', 'eu_cm1', 'el_k', 'eu_k']
# check that unit is given, and correct
# or set default (eu_k)
# set efrom if supplied
if efrom != None:
returnlist.append( ('energy_range_from', str(efrom)) )
# set eto if supplied
if eto != None:
returnlist.append( ('energy_range_to', str(eto)) )
# check if eunit is given, and tick the corresponding radio
# button, if none then assume Kelvin
if eunit != None: #arg.has_key('efrom') or arg.has_key('eto'):
if eunit.lower() in eunit_ref:
pass
else:
print 'Energy range unit keyword \'eunit\' malformed.'
raise Exception('eunit keyword malformed')
else:
# no value, assuming its in Kelvin (i.e. Eu/kb)
eunit = 'eu_k'
# now set the eunit radio button
returnlist.append( ('energy_range_type', eunit.lower() ) )
return returnlist
def _parse_transition( trans ):
"""
# transition (triggered)
tran=1-0
"""
return ('tran', str(trans))
def _parse_lill( lill ):
"""
# line intensity lower limit (triggered)
#~ lill_cdms_jpl=-5
#~ or
#~ lill_sijmu2
#~ or
#~ lill_aij
"""
### Line Intensity Lower Limits
if lill != None:
if lill[1].lower() == 'cdms_jpl':
return ( 'lill_cdms_jpl', str(lill[0]) )
elif lill[1].lower() == 'sijmu2':
return ( 'lill_sijmu2', str(lill[0]) )
elif lill[1].lower() == 'aij':
return ( 'lill_aij', str(lill[0]) )
def _parameters_ending():
returnlist = [
('export_type','current'),
('export_delimiter','colon'),
('offset','0'),
('limit', str(HIT_LIMIT)),
('range','on'),
('submit','Export')
]
return returnlist
def _get_results(parameters):
parameters = urlencode(parameters)
path = SPLAT_FORM_URL
req = Request(path, parameters)
req.add_header("Content-type", "application/x-www-form-urlencoded")
results = urlopen(req, timeout=SPLATALOGUE_TIMEOUT).read()
return results
def _parse_results(data, output='astropy.table'):
"""
Only one output type at the moment, the astropy.table table
"""
#TODO : what if results are empty
if output == 'astropy.table':
if not use_astropy:
#~ print('Astropy not installed, try other output format')
raise(ImportError('Astropy not installed, try other output format'))
# get each line (i.e. each molecule)
rows = data.split('\n')
# get the names of the columns
column_names = rows[0]
column_names = column_names.split(':')
# clean them up a bit
for i in _np.arange(len(column_names)):
column_names[i] = column_names[i].replace('<br>', ' ')
column_names[i] = column_names[i].replace('<sub>', '_')
column_names[i] = column_names[i].replace('<sup>', '^')
column_names[i] = column_names[i].replace('</sup>', '')
column_names[i] = column_names[i].replace('</sub>', '')
column_names[i] = column_names[i].replace('μ', 'mu')
column_names[i] = column_names[i].replace('sid[0] is null', '')
column_names[i] = column_names[i].replace('sid[0] is null', '')
"""
Column Names should now be:
['Species',
'NRAO Recommended',
'Chemical Name',
'Freq-GHz',
'Freq Err',
'Meas Freq-GHz',
'Meas Freq Err',
'Resolved QNs',
'Unresolved Quantum Numbers',
'CDMS/JPL Intensity',
'S_ijmu^2 (D^2)',
'S_ij',
'Log_10 (A_ij)',
'Lovas/AST Intensity',
'E_L (cm^-1)',
'E_L (K)',
'E_U (cm^-1)',
'E_U (K)',
'HFS int',
'Upper State Degeneracy',
'Molecule Tag',
'Quantum Number Code',
'Linelist']
"""
rows = rows[1:-1]
rows = [i.split(':') for i in rows]
rows = _np.array(rows)
rows[rows == ''] = -999999
#~ print column_names
#~ return rows
column_dtypes = ['str', # 'Species',
'str', # 'NRAO Recommended',
'str', # 'Chemical Name',
'float', # 'Freq-GHz',
'float', # 'Freq Err',
'float', # 'Meas Freq-GHz',
'float', # 'Meas Freq Err',
'str', # 'Resolved QNs',
'str', # 'Unresolved Quantum Numbers',
'float', # 'CDMS/JPL Intensity',
'float', # 'S_ijmu^2 (D^2)',
'float', # 'S_ij',
'float', # 'Log_10 (A_ij)',
'str', # 'Lovas/AST Intensity',
'float', # 'E_L (cm^-1)',
'float', # 'E_L (K)',
'float', # 'E_U (cm^-1)',
'float', # 'E_U (K)',
'float', # 'HFS int',
'float', # 'Upper State Degeneracy',
'int', # 'Molecule Tag',
'int', # 'Quantum Number Code',
'str'] # 'Linelist']
funit = str(column_names[3][-3:])
column_units = [None, # 'Species',
None, # 'NRAO Recommended',
None, # 'Chemical Name',
funit, # 'Freq-GHz',
funit, # 'Freq Err',
funit, # 'Meas Freq-GHz',
funit, # 'Meas Freq Err',
None, # 'Resolved QNs',
None, # 'Unresolved Quantum Numbers',
'?', # 'CDMS/JPL Intensity',
'Debye^2', # 'S_ijmu^2 (D^2)',
'?', # 'S_ij',
'log10(s^-1)', # 'Log_10 (A_ij)',
'?', # 'Lovas/AST Intensity',
'cm^-1', # 'E_L (cm^-1)',
'K', # 'E_L (K)',
'cm^-1', # 'E_U (cm^-1)',
'K', # 'E_U (K)',
'?', # 'HFS int',
None, # 'Upper State Degeneracy',
None, # 'Molecule Tag',
None, # 'Quantum Number Code',
None] # 'Linelist']
column_names_original = column_names[:]
#~ column_names = [i.lower() for i in column_names]
#~ for i in _np.arange(len(column_names)):
#~ column_names[i] = column_names[i].replace('nrao recommended', 'nrao_rec')
#~ column_names[i] = column_names[i].replace('chemical name', 'name')
#~ if 'meas freq err' in column_names[i]:
#~ column_names[i] = 'mferr'
#~ elif 'meas freq' in column_names[i]:
#~ column_names[i] = 'mfreq'
#~ elif 'freq err' in column_names[i]:
#~ column_names[i] = 'ferr'
#~ elif 'freq' in column_names[i]:
#~ column_names[i] = 'freq'
#~ column_names[i] = column_names[i].replace('resolved qns', 'resqn')
#~ column_names[i] = column_names[i].replace('unresolved quantum numbers', 'resqn')
column_names = ['species',
'nrao_rec',
'name',
'ofreq',
'oferr',
'mfreq',
'mferr',
'res_qn',
'uresqn',
'cdmsjplint',
'sijmu2',
'Sij',
'logaij',
'lovasastint',
'el_cm',
'el_k',
'eu_cm',
'eu_k',
'hfsint',
'gu',
'tag',
'qncode',
'list']
results = Table(data = rows ,
names = column_names,
dtypes = column_dtypes)
for i in _np.arange(len(column_units)):
results.field(i).units = column_units[i]
return results
else:
print('Nothing else than astropy.table output is implemented atm')
return results
"""
sid[]=
# Energy level display (triggered)
# 1 : Elower (cm-1)
# 2 : Elower (K)
# 3 : Eupper (cm-1)
# 4 : Eupper (K)
el1=el1
el2=el2
el3=el3
el4=el4
# Line strength display (triggered)
# 1 : CDMS/JPL Intensity
# 2 : Sij mu2
# 3 : Sij
# 4 : Aij
# 5 : Lovas/AST
ls1=ls1
ls2=ls2
ls3=ls3
ls4=ls4
ls5=ls5
# line list (triggered)
# def all on
displayRecomb=displayRecomb
displayLovas=displayLovas
displaySLAIM=displaySLAIM
displayJPL=displayJPL
displayCDMS=displayCDMS
displayToyaMA=displayToyaMA
displayOSU=displayOSU
displayLisa=displayLisa
displayRFI=displayRFI
# data versions (choose)
# def v2.0
data_version=v2.0
or
data_version=v1.0
or
data_version=vall
# Exclude atmospheric species (triggered)
# def on
no_atmospheric=no_atmospheric
# Exclude potential interstellar species (triggered)
# def on
no_potential=no_potential
# Exclude probable interstellar species (triggered)
# def on
no_probable=no_probable
# Exclude known AST species (triggered)
# def off
known=known
# Show ONLY NRAO Recommended Freq (triggered)
# def off
include_only_nrao=include_only_nrao
# Display Unresolved quantum numbers (triggered)
# def on
show_unres_qn=show_unres_qn
# Show upper degeneracy (triggered)
# def on
show_upper_degeneracy=show_upper_degeneracy
# Display Molecule Tag (triggered)
# def on
show_molecule_tag=show_molecule_tag
# No HFS Display (triggered)
noHFS=noHFS
# Display HFS Intensity (triggered)
displayHFS=displayHFS
# Display Quantum Number Code (triggered)
show_qn_code=show_qn_code
# Display Lab Ref (triggered)
show_lovas_labref=show_lovas_labref
# Display Obs Ref (triggered)
show_lovas_obsref=show_lovas_obsref
# Display Ordered Frequency ONLY (triggered)
show_orderedfreq_only=show_orderedfreq_only
# Display NRAO Recommended Frequencies (triggered)
show_nrao_recommended=show_nrao_recommended
# transition (triggered)
tran=1-0
# frequency
from=31
to=31
frequency_units=GHz
or
frequency_units=MHz
# line intensity lower limit (triggered)
lill_cdms_jpl=-5
or
lill_sijmu2
or
lill_aij
# Energy range (triggered)
# but if one exists, the energy_range_type must exist
energy_range_from=10
energy_range_to=500
energy_range_type=eu_k
or
energy_range_type=el_k
or
energy_range_type=el_cm1
or
energy_range_type=eu_cm1
submit=1
"""
|
# Code der Aufgabe 1 hier
class Vector3:
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def len(self):
return (self.x**2 + self.y**2 + self.z**2)**0.5
# folgenden Code nur ausführen, wenn diese Modul direkt ausgeführt wird
# (nicht beim Importieren)
if __name__ == '__main__':
a = Vector3(1, 1, 0)
b = Vector3(1, 2, 3)
print(a.len())
print(b.len())
|
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.shortcuts import redirect, reverse, render, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import FormView, ListView, DeleteView, View, DetailView, CreateView, UpdateView
from certificate.forms import (
InterviewCreateForm,
CertificateCreateForm,
TestCreateForm,
TrainingCreateForm,
PolishCreateForm,
InterviewUpdateForm,
LocksmithCreateForm, WelderCreateForm, OtherCreateForm, PostOtherCreateForm, PostWelderCreateForm,
PostLocksmithCreateForm)
from dwelling.forms import DwellingCreateForm
from medicine.forms import InsuranceCreateForm
from privileges.forms import QuotaCreateForm
from work.forms import (
DismissalCreateForm,
AssignmentCreateForm,
AdoptionCreateForm,
TransferCreateForm,
PermissionCreateForm,
VnosokCreateForm,
VocationCreateForm,
ControlCreateForm,
PrognosisCreateForm,
DisappearanceCreateForm,
VoivodshipCreateForm, EmployerCreateForm, FactoryCreateForm)
from user_profile.forms import AuthForm, NewUserForm, NoteCreateForm, ContactCreateForm, LanguageCreateForm, \
UserEditForm, NoteUpdateForm, ContactUpdateForm, LanguageUpdateForm, ContactTypeCreateForm, LanguageTitleCreateForm, \
LanguageLevelCreateForm, PortraitCreateForm
from django.utils.translation import ugettext as _
from documents.forms import UAPassportCreateForm, ForeignPassportCreateForm, VisaCreateForm, PersonalIDCreateForm
from user_profile.models import User, Contact, Language, Status, PsychologicalPortrait
from work.views import VoivodshipCreateView, EmployerCreateView, FactoryCreateView
class AuthView(FormView):
form_class = AuthForm
template_name = 'user_profile/authentication-signin.html'
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('dashboard')
return super(AuthView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
email = form.cleaned_data['email']
password = form.cleaned_data['password']
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(self.request, user)
return redirect('users')
messages.error(self.request, _('Не верный адрес почты или пароль'), 'danger')
return redirect(self.request.META.get('HTTP_REFERER'))
def user_logout(request):
logout(request)
return redirect('user:login')
class UserListView(LoginRequiredMixin, ListView):
template_name = 'user_profile/users-list.html'
context_object_name = 'users'
model = User
login_url = reverse_lazy('user:login')
# queryset = User.objects.all()
# <- QuerySet read docs
# def get_queryset(self):
# print('-'*80)
# print(self.request.GET.get('param'))
# return User.objects.filter(id=self.request.user.id)
# class UserDetailView(LoginRequiredMixin, DetailView):
# template_name = 'user_profile/user-detail.html'
# context_object_name = 'users_list'
# model = User
# login_url = reverse_lazy('user:login')
def user_detail_view(request, pk):
user = get_object_or_404(User, pk=pk)
ua_passport_form = UAPassportCreateForm
foreign_passport_form = ForeignPassportCreateForm
note_create_form = NoteCreateForm
note_update_form = NoteUpdateForm(initial={'note': user.note})
contact_create_form = ContactCreateForm
language_create_form = LanguageCreateForm
user_edit_form = UserEditForm(pk)
interview_create_form = InterviewCreateForm
dwelling_create_form = DwellingCreateForm
insurance_create_form = InsuranceCreateForm
quota_create_form = QuotaCreateForm
dismissal_create_form = DismissalCreateForm
assignment_create_form = AssignmentCreateForm
adoption_create_form = AdoptionCreateForm
transfer_create_form = TransferCreateForm
permission_create_form = PermissionCreateForm
# vnosok_create_form = VnosokCreateForm
vocation_create_form = VocationCreateForm
control_create_form = ControlCreateForm
prognosis_create_form = PrognosisCreateForm
disappearance_create_form = DisappearanceCreateForm
certificate_create_form = CertificateCreateForm
test_create_form = TestCreateForm
training_create_form = TrainingCreateForm
polish_create_form = PolishCreateForm
visa_create_form = VisaCreateForm
id_create_form = PersonalIDCreateForm
contact_type_create_form = ContactTypeCreateForm
language_title_create_form = LanguageTitleCreateForm
language_level_create_form = LanguageLevelCreateForm
voivodship_create_form = VoivodshipCreateForm
employer_create_form = EmployerCreateForm
factory_create_form = FactoryCreateForm
portrait_create_form = PortraitCreateForm
contact_update_form = ContactUpdateForm
language_update_form = LanguageUpdateForm
locksmith_create_form = LocksmithCreateForm
welder_create_form = WelderCreateForm
other_create_form = OtherCreateForm
post_locksmith_create_form = PostLocksmithCreateForm
post_welder_create_form = PostWelderCreateForm
post_other_create_form = PostOtherCreateForm
return render(request, 'user_profile/user-detail.html', context={
'user': user,
'locksmith_create_form': locksmith_create_form,
'welder_create_form': welder_create_form,
'other_create_form': other_create_form,
'post_locksmith_create_form': post_locksmith_create_form,
'post_welder_create_form': post_welder_create_form,
'post_other_create_form': post_other_create_form,
'voivodship_create_form': voivodship_create_form,
'employer_create_form': employer_create_form,
'factory_create_form': factory_create_form,
'id_create_form': id_create_form,
'ua_passport_form': ua_passport_form,
'foreign_passport_form': foreign_passport_form,
'note_create_form': note_create_form,
'contact_create_form': contact_create_form,
'language_create_form': language_create_form,
'user_edit_form': user_edit_form,
'interview_create_form': interview_create_form,
'dwelling_create_form': dwelling_create_form,
'insurance_create_form': insurance_create_form,
'quota_create_form': quota_create_form,
'dismissal_create_form': dismissal_create_form,
'assignment_create_form': assignment_create_form,
'adoption_create_form': adoption_create_form,
'permission_create_form': permission_create_form,
'vocation_create_form': vocation_create_form,
'control_create_form': control_create_form,
'prognosis_create_form': prognosis_create_form,
'disappearance_create_form': disappearance_create_form,
'transfer_create_form': transfer_create_form,
'certificate_create_form': certificate_create_form,
'test_create_form': test_create_form,
'training_create_form': training_create_form,
'polish_create_form': polish_create_form,
'visa_create_form': visa_create_form,
'contact_update_form': contact_update_form,
'language_update_form': language_update_form,
'note_update_form': note_update_form,
'contact_type_create_form': contact_type_create_form,
'language_title_create_form': language_title_create_form,
'language_level_create_form': language_level_create_form,
'portrait_create_form': portrait_create_form,
})
# def get_context_data(self, *args, **kwargs):
# context = super(UserListView, self).get_context_data(*args, **kwargs)
# context['ua_passport_form'] = UAPassportForm
#
# return context
# def user_profile(request, pk):
# user = user_profile.objects.get(pk=pk)
# return render(request, 'user_profile.html', {'user': user})
class NewUserView(LoginRequiredMixin, CreateView):
login_url = reverse_lazy('user:login')
template_name = 'user_profile/add_user.html'
form_class = NewUserForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.status = Status.objects.get(pk=1)
obj.save()
return super().form_valid(form)
def get_success_url(self):
return reverse('user:user-detail', kwargs={'pk': self.object.pk})
# def new_user(request):
#
# if request.method == 'POST':
# email = request.POST['email']
# first_name = request.POST['first_name']
# last_name = request.POST['last_name']
# patronymic = request.POST['patronymic']
# position = request.POST['position']
# is_worker = request.POST['is_worker']
#
# user = User.objects.create(
# email=email,
# first_name=first_name,
# last_name=last_name,
# patronymic=patronymic,
# position=position,
# is_worker=is_worker,
# )
#
# return redirect('user:user-detail', pk=user.pk)
#
# return render(request, 'user_profile/add_user.html')
class NoteCreateView(UpdateView):
login_url = reverse_lazy('user:note-create')
# template_name = 'user_profile/add_user.html'
model = User
form_class = NoteCreateForm
#
# def get_initial(self):
# super(NoteCreateView, self).get_initial()
# user = User.objects.get(pk=self.kwargs['pk'])
# self.initial = {'note': user.note}
# return self.initial
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
class ContactCreateView(CreateView):
# model = UkrainianPassport
form_class = ContactCreateForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = User.objects.get(pk=self.kwargs['pk'])
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class PortraitCreateView(CreateView):
# model = UkrainianPassport
form_class = PortraitCreateForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = User.objects.get(pk=self.kwargs['pk'])
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class ContactTypeCreateView(CreateView):
# model = UkrainianPassport
form_class = ContactTypeCreateForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class LanguageTitleCreateView(CreateView):
# model = UkrainianPassport
form_class = LanguageTitleCreateForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class LanguageLevelCreateView(CreateView):
# model = UkrainianPassport
form_class = LanguageLevelCreateForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class LanguageCreateView(CreateView):
# model = UkrainianPassport
form_class = LanguageCreateForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = User.objects.get(pk=self.kwargs['pk'])
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class UserEditView(UpdateView):
login_url = reverse_lazy('user:user-edit')
template_name = 'user_profile/add_user.html'
model = User
# form_class = UserEditForm
fields = (
'avatar',
'email',
'first_name',
'last_name',
'patronymic',
'position',
'status',
'date_of_birth',
'registration',
'residence_address',
)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class NoteUpdateView(UpdateView):
login_url = reverse_lazy('user:note-create')
model = User
# form_class = NoteUpdateForm
fields = (
'note',
)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
class ContactUpdateView(UpdateView):
model = Contact
pk_url_kwarg = 'count'
context_object_name = 'contact'
fields = (
'type',
'contact',
)
def form_valid(self, form):
contact = form.save(commit=False)
contact.user = User.objects.get(pk=self.kwargs['pk'])
contact.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class PortraitUpdateView(UpdateView):
model = PsychologicalPortrait
pk_url_kwarg = 'count'
context_object_name = 'contact'
fields = (
'intelligence',
'professionalism',
'self_discipline',
'sociability',
'temperament',
'personality_type',
)
def form_valid(self, form):
contact = form.save(commit=False)
contact.user = User.objects.get(pk=self.kwargs['pk'])
contact.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class LanguageUpdateView(UpdateView):
model = Language
pk_url_kwarg = 'count'
fields = (
'title',
'level',
)
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = User.objects.get(pk=self.kwargs['pk'])
obj.save()
return redirect(self.request.META.get('HTTP_REFERER'))
def form_invalid(self, form):
return redirect(self.request.META.get('HTTP_REFERER'))
class ContactDeleteView(DeleteView):
model = Contact
pk_url_kwarg = 'count'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return redirect(self.request.META.get('HTTP_REFERER'))
# context_object_name = 'contact'
# fields = (
# 'type',
# 'contact',
# )
#
# def form_valid(self, form):
# contact = self.get_object()
# contact.delete()
# # contact = form.save(commit=False)
# # contact.user = User.objects.get(pk=self.kwargs['pk'])
# # contact.save()
# return redirect(self.request.META.get('HTTP_REFERER'))
#
# def form_invalid(self, form):
# return redirect(self.request.META.get('HTTP_REFERER'))
#
# def get_success_url(self):
# return redirect(self.request.META.get('HTTP_REFERER'))
class LanguageDeleteView(DeleteView):
model = Language
pk_url_kwarg = 'count'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return redirect(self.request.META.get('HTTP_REFERER'))
class PortraitDeleteView(DeleteView):
model = PsychologicalPortrait
pk_url_kwarg = 'count'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return redirect(self.request.META.get('HTTP_REFERER'))
|
from datetime import datetime, timezone
from time import sleep, monotonic
from urllib.parse import quote_plus
from uuid import uuid4
import requests
from config import sheet_api_url
from utils.myworkers import MyWorkers
book_worker = MyWorkers(1)
# Получение JSON из гугл таблицы
def get_data_from_sheet(params, method='GET'):
if method == 'GET':
r = requests.get(sheet_api_url + params)
elif method == 'POST':
r = requests.post(sheet_api_url, json=params)
if r.status_code == 200:
if 'application/json' in r.headers['Content-Type']:
return r.json()
else:
print(r.text)
raise TypeError('Something wrong with request to table')
return None
def get_tour_list_from_data(data):
# return [(d['index'], d['schedule']) for d in data['data']]
l = [list(d.values()) for d in data['data']]
for r in l:
r[3], r[4], r[5] = int(r[3]), int(r[4] or r[3]), int(r[5] or r[3])
r[8], r[9], r[10] = int(r[8]), int(r[9]), int(r[10] or 0)
return l
def get_menu_dict_from_data(data):
i = 1
menu_dict = {f'menu{i}': [], 'list': []}
for d in data['data']:
if d['item']:
menu_dict[f'menu{i}'].append((d['number'], d['item'], int(d['cost'][:-1])))
menu_dict['list'].append((d['number'], d['item'], int(d['cost'][:-1])))
else:
i += 1
menu_dict[f'menu{i}'] = []
menu_dict['menus'] = i
return menu_dict
def get_tour_list():
# global tour_list
data = None
while data is None:
data = get_data_from_sheet('?getData=1')
if data:
tour_list = get_tour_list_from_data(data)
return tour_list
# print(tour_list)
else:
print('Список туров не получен')
sleep(1)
# Скачивает все доступные данные с гугл таблицы
def get_all():
# global menu_dict
# global tour_list
data = None
while data is None:
data = get_data_from_sheet('?getAll=1')
if data:
tour_list = get_tour_list_from_data(data['schedule'])
menu_dict = get_menu_dict_from_data(data['menu'])
return menu_dict, tour_list
else:
print('Данные не получены')
sleep(1)
return None, None
# Скачивает меню из гугл таблицы
def get_menu_dict():
data = None
while data is None:
data = get_data_from_sheet('?getData=2')
if data:
menu_dict = get_menu_dict_from_data(data)
return menu_dict
else:
print('Список меню не получен')
sleep(1)
def send_order_to_table(user):
"""Строго для botmenu"""
user["menu_bill"] = str(user["menu_bill"])
params = f'?addOrder=1' \
f'&tour={quote_plus(user["tour"][:20] + "...")}' \
f'&fio={quote_plus(user["fio"])}' \
f'&bill={quote_plus(user["menu_bill"])}' \
f'&payment={quote_plus(user["payment"])}' \
f'&tg={quote_plus(user["tg"])}'
for m in user['menu_list']:
params += f'&list={quote_plus(m[1])}'
data = get_data_from_sheet(params)
print(data)
def send_book_to_table(user):
"""Отправка брони тура. Строго для bottour"""
now = datetime.now(tz=timezone.utc).isoformat()
user['register']['payment_id'] = str(uuid4()).replace('-', '')[:6]
request = {
'method': 'addBook',
'tour': user['register']['tour_name'][:20],
'p_list': [[now, user['tg']] + p['name'].split(' ', 1) + list(p.values())[1:] +
[user['register']['payment_id'], 'Ожидание оплаты']
for p in user['register']['persons_list']]
}
data = {}
book_worker.add_task(_send_book_to_table, (request, data))
timer = monotonic()
while not data:
if monotonic() - timer > 30:
raise TimeoutError('Too long response')
sleep(0.3)
print('Ответ от GAS', data)
if not('msg' in data and data['msg'] == 'OK'):
raise ValueError(data)
def _send_book_to_table(request, response):
"""Вспомогательная функция для отправки брони асинхронно"""
data = get_data_from_sheet(request, 'POST')
response.update(data)
def send_payment_accept(tour, payment_id, method=None):
method = method or 'acceptPayment'
request = {
'method': method,
'tour': tour[:20],
'paymentID': payment_id
}
data = get_data_from_sheet(request, 'POST')
print('Ответ от GAS', data)
if not ('msg' in data and data['msg'] == 'OK'):
raise ValueError(data)
|
# read.py
# loading a file with open()
import numpy as np
resultados = open("resultados.txt", "r")
lines = 0
n = int(input("Número de processos: "))
r = int(input("Número de repeticoes: "))
# initialize array with counter for all processes
# in the end, every index of this array = r
process_counter = np.zeros(n)
print(process_counter)
# reading each line of the file and printing to the console
for line in resultados:
lines+=1
print(line[0])
process_counter[int(line[0])] += 1
resultados.close()
print(process_counter)
if lines == n*r:
print("\nNúmero de linhas correto!\n")
else:
print("\nERRO: Número de linhas incorreto!\n")
if (process_counter == r).all():
print("Todos os processos escreveram", r, "vezes")
else:
print("ERRO: nem todos os processos escreveram", r, "vezes")
print(process_counter) |
def is_Even_or_Odd(number):
if number % 2 == 0:
print("This number is even :", number)
else:
print("The number entered is odd: ", number)
num = int(input("Enter the number: "))
x = num
is_Even_or_Odd(x)
|
import es920lr
import time
import sys
import serial
import MySQLdb
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", user="OWNER", passwd="12345", db="prototype_lab")
cur = db.cursor()
ser = serial.Serial("/dev/ttyUSB0", 115200)
lr = es920lr.ES920LR(ser)
lr.set_id("0001", "1111", "FFFF")
if lr.open():
print "OK"
time.sleep(3)
else:
lr.close()
print "NG"
sys.exit(1)
try:
while True:
payload = lr.read()
if payload == None:
pass
else:
payload_tpl = payload
tmp = payload_tpl[2][:len(payload_tpl[2])-2]
counter_str = tmp.split(',')[0]
ibeaconId_str = tmp.split(',')[1]
LoRaId_str = payload_tpl[1]
sql_str = "INSERT INTO Occupation_info (Count, iBeaconId, LoRaId) VALUES(" + counter_str + ","
if cur.execute(sql_str + "\'" + ibeaconId_str +"\', " + "\'" + LoRaId_str+"\');"):
print "commit to db"
db.commit()
time.sleep(0.000001)
except KeyboardInterrupt:
lr.close()
cur.execute("TRUNCATE TABLE Occupation_info;")
cur.close()
db.close()
|
import argparse
def argument():
parser = argparse.ArgumentParser(description = 'Executes extraction of HC metrics vs SAT')
parser.add_argument( '--inputdir', '-i',
type = str,
required = True,
help = 'Where the metrics HC are saved'
)
parser.add_argument( '--outputdir', '-o',
type = str,
required = True,
help = 'The directory where you want to dump compressed files'
)
return parser.parse_args()
import numpy as np
import netCDF4 as NC
from commons.Timelist import TimeList
from basins import V2 as OGS
from commons.utils import writetable
from commons.utils import addsep
args = argument()
INDIR = addsep(args.inputdir)
OUTDIR = addsep(args.outputdir)
SATtypes = ['DT','NRT']
# QuID 006-014 V1.3 (V6C)
EAN_RMS = {
'win': 0.10,
'sum': 0.06,
}
EAN_BIAS = {
'win': 0.06,
'sum': 0.01,
}
SeasonMonths = {
'win': [11,12,1,2,3,4],
'sum': [5,6,7,8,9,10],
}
nSub = len(OGS.P.basin_list)
for isub,sub in enumerate(OGS.P.basin_list):
if 'med' in sub.name:
index_med = isub
col_names=["BIAS","RMSE","EAN_bias","EAN_rmse"]
for tt in SATtypes:
TL = TimeList.fromfilenames(None,INDIR,"Validation_hc_*" + tt + "*.nc", \
prefix="Validation_hc_YYYYMMDD_on_weekly_Sat" + tt + ".", \
dateformat='%Y%m%d')
lenTL = TL.nTimes
BIAS = np.zeros((lenTL,nSub))
RMSE = np.zeros((lenTL,nSub))
EAN_bias = np.zeros((lenTL,nSub))
EAN_rmse = np.zeros((lenTL,nSub))
Dates = []
for ii,filein in enumerate(TL.filelist):
dd = TL.Timelist[ii]
datef = dd.strftime('%Y-%m-%d')
#print(tt + ' ' + datef)
Dates.append(datef)
for ss in SeasonMonths.keys():
if dd.month in SeasonMonths[ss]:
seas = ss
M = NC.Dataset(filein,"r")
model = M.variables['MODEL_MEAN_LOG'][:,1]
sat = M.variables['SAT___MEAN_LOG'][:,1]
BIAS[ii,:] = model-sat
RMSE[ii,:] = M.variables['BGC_CLASS4_CHL_RMS_SURF_BASIN_LOG'][:,1]
EAN_bias[ii,:] = EAN_BIAS[seas]
EAN_rmse[ii,:] = EAN_RMS[seas]
startdate = TL.Timelist[0].strftime('%Y%m%d')
enddate = TL.Timelist[-1].strftime('%Y%m%d')
filetxt = OUTDIR + '/table_statistics_V9C_' + tt + '.txt'
row_names=Dates
METRICS=np.zeros((lenTL,4))*np.nan
METRICS[:,0] = BIAS[:,index_med]
METRICS[:,1] = RMSE[:,index_med]
METRICS[:,2] = EAN_bias[:,index_med]
METRICS[:,3] = EAN_rmse[:,index_med]
writetable(filetxt,METRICS,row_names,col_names,fmt="%5.3f\t ")
|
import pandas as pd
import numpy as np
import jieba
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import special_dict
from collections import Counter
from chinesetrans.langconv import *
from PIL import Image
import re
import matplotlib
FILE_PATH = '自由时报正文分詞.xlsx'
TYPE_NAME = 'context' # context or title
def getFrequence(frame, sheetname, index_name, word_cloud=False, word_cloud_name='test.png', frequence=True):
'''
得到报道中关键词的出现次数和词频
:param frame: pandas中的 Dataframe 数据类型
:param sheetname: 输出表单名
:param index_name: dataframe中的索引名
:param word_cloud: 是否需要生成词云
:param frequence: 是否获取词频
:return: NULL
'''
raw_title = ''
for i in range(0, frame.index.size):
raw_title += frame[index_name][i] + ' '
# 繁體轉簡體,因為fenci包不兼容繁體
st_word = Traditional2Simplified(raw_title)
cut_title = jieba.cut(st_word, cut_all=False)
output_titles = []
for i in cut_title:
if not isinstance(i, str):
i = str(i)
# 簡體轉繁體
i = Simplified2Traditional(i)
if i in special_dict.replace_list:
i = special_dict.replace_list[i]
if i not in special_dict.banlist:
output_titles.append(i)
if word_cloud is True:
generateWordCloud(' '.join(output_titles), word_cloud_name)
top_title = Counter(output_titles).most_common(300) # 找出出现频率最高的词
cipin = pd.DataFrame(top_title, columns=['关键词', '出现次数'])
if frequence is True:
total_reports = frame.index.size
cipin['词频'] = cipin['出现次数'] / total_reports
cipin.to_excel(writer, sheet_name=sheetname, index=False)
def generateWordCloud(string, filename):
stopwords = set(STOPWORDS)
stopwords.add(" ")
coloring = np.array(Image.open("../resource/taiwan.jpg"))
# create coloring from image
image_colors = ImageColorGenerator(coloring)
font = r'C:\Windows\Fonts\msyh.ttc'
wc = WordCloud(background_color="white", max_words=100, mask=coloring, width=800, height=600,
max_font_size=70, random_state=32, font_path=font, scale=32)
wc.generate(string)
# 在只设置mask的情况下,你将会得到一个拥有图片形状的词云
plt.figure(figsize=(60, 75)) # 清晰度
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.figure()
plt.show()
wc.to_file(filename)
def changeDateFormat(frame):
# 将 xxxx年xx月xx日变为 xxxx-xx-xx
for i in range(0, frame.index.size):
t = re.findall(r"\d+\.?\d*", frame['date'][i])
frame['date'][i] = np.datetime64(t[0]+'-'+t[1]+'-'+t[2])
frame = pd.read_excel('ltn.xlsx', columns=['title', 'date', 'context'],
dtype={'title': np.str, 'date': np.str, 'context': np.str})
changeDateFormat(frame)
frame[['date']] = frame[['date']].astype(np.datetime64)
frame['date'] = frame['date'].dt.date
writer = pd.ExcelWriter(FILE_PATH)
getFrequence(frame=frame, sheetname="All", index_name=TYPE_NAME, word_cloud=False, frequence=False)
# 將新聞按日期分,每個日期設為一個dataframe
group_by_date = frame.groupby('date')
info = group_by_date.size()
info.to_excel(writer, sheet_name='報道數量', index=True)
dates = list(group_by_date.groups.keys())
for date in dates:
df = group_by_date.get_group(str(date))
df = df.reset_index(drop=True)
getFrequence(df, sheetname=str(date), index_name=TYPE_NAME, word_cloud=False, frequence=False)
writer.save()
|
from rest_framework.request import Request
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from ..models import AndelaUserProfile
def jwt_authentication_middleware(get_response):
def middleware(request):
try:
user_jwt = JSONWebTokenAuthentication().authenticate(Request(request))
user = user_jwt[0]
andela_user_profile = AndelaUserProfile.objects.get(user=user)
request.user = andela_user_profile
except Exception:
pass
return get_response(request)
return middleware
|
#!/usr/bin/python3
"""Script to return user info based on given ID number"""
import requests
import sys
if __name__ == "__main__":
url = 'http://jsonplaceholder.typicode.com/users/'
r_todo = requests.get(url + '{}/todos'.format(sys.argv[1])).json()
r_user = requests.get(url + '{}'.format(sys.argv[1])).json()
fin = [x for x in r_todo if x.get('completed') is True]
print("Employee {} is done with tasks({}/{}):".format(
r_user.get('name'), len(fin), len(r_todo)))
for n in fin:
print("\t {}".format(n.get('title')))
|
import FWCore.ParameterSet.Config as cms
process = cms.Process('DQM')
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
process.verbosity = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# minimum of logs
process.MessageLogger = cms.Service("MessageLogger",
statistics = cms.untracked.vstring(),
destinations = cms.untracked.vstring('cerr'),
cerr = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING')
)
)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
# load DQM framework
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = "CTPPS"
process.dqmEnv.eventInfoFolder = "EventInfo"
process.dqmSaver.path = ""
process.dqmSaver.tag = "CTPPS"
process.source = cms.Source('PoolSource',
fileNames = cms.untracked.vstring(
'/store/group/dpg_ctpps/comm_ctpps/TotemT2/RecoTest/emulated_digi_test.root',
),
)
process.load('Geometry.ForwardCommonData.totemT22021V2XML_cfi')
process.load('Geometry.ForwardGeometry.totemGeometryESModule_cfi')
process.load('RecoPPS.Local.totemT2RecHits_cfi')
process.load('DQM.CTPPS.totemT2DQMSource_cfi')
process.path = cms.Path(
process.totemT2RecHits *
process.totemT2DQMSource
)
process.end_path = cms.EndPath(
process.dqmEnv +
process.dqmSaver
)
process.schedule = cms.Schedule(
process.path,
process.end_path
)
|
# Shamelessly ripped and modified from
# https://github.com/SublimeText/TrailingSpaces
import sublime, sublime_plugin
DEFAULT_MAX_FILE_SIZE = 1048576
DEFAULT_COLOR_SCOPE_NAME = "invalid"
DEFAULT_IS_ENABLED = True
# Set whether the plugin is on or off.
# Reuseing TrailingSpaces' config.
ts_settings = sublime.load_settings('trailing_spaces.sublime-settings')
trailing_spaces_enabled = bool(ts_settings.get('trailing_spaces_enabled',
DEFAULT_IS_ENABLED))
# Determine if the view is a find results view.
def is_find_results(view):
return view.settings().get('syntax') and \
"Find Results" in view.settings().get('syntax')
# Return an array of regions matching mixed whitspace.
def find_mixed_whitspace(view):
return view.find_all('(\t+ +)|( +\t+)')
# Highlight mixed whitspace.
def highlight_mixed_whitspace(view):
max_size = ts_settings.get('trailing_spaces_file_max_size',
DEFAULT_MAX_FILE_SIZE)
color_scope_name = ts_settings.get('trailing_spaces_highlight_color',
DEFAULT_COLOR_SCOPE_NAME)
if view.size() <= max_size and not is_find_results(view):
regions = find_mixed_whitspace(view)
view.add_regions('MixedWhitespaceHighlightListener', regions,
color_scope_name, sublime.DRAW_EMPTY)
# Highlight matching regions.
class MixedWhitespaceHighlightListener(sublime_plugin.EventListener):
def on_modified(self, view):
if trailing_spaces_enabled:
highlight_mixed_whitspace(view)
def on_activated(self, view):
if trailing_spaces_enabled:
highlight_mixed_whitspace(view)
def on_load(self, view):
if trailing_spaces_enabled:
highlight_mixed_whitspace(view) |
# Database config
database_host='127.0.0.1'
database_name='sports'
database_user='root'
database_password=''
# Some private secret for hashing
secret='1482ec1b2364f64e7d162a2b5b16f477' |
#question:
#Answer of the output should be:
# {'Randy':['Input.txt','Output.txt'], 'Stan':['Code.py']}
# class FileOwners:
# @staticmethod
# def group_by_owners(files):
# return None
# files = {
# 'Input.txt': 'Randy',
# 'Code.py': 'Stan',
# 'Output.txt': 'Randy'
# }
# print(FileOwners.group_by_owners(files))
"""
class FileOwners:
@staticmethod
def group_by_owners(files):
if not files:
return None
owners={}
#for file in files:
for i in files:
#print(i)
#print (file)
#print(files)
if files[i] not in owners:
print(files[i])
owners[files[i]]=[]
print(str(files[i]))
# owners[files[file]]=[]
owners[files[i]].append(i)
return owners
files = {
'Input.txt': 'Randy',
'Code.py': 'Stan',
'Output.txt': 'Randy'
}
FileOwners.group_by_owners(files)
print(FileOwners.group_by_owners(files))
#newdic=FileOwners.group_by_owners(files)
#print(type(newdic))
##
filelist = {
'Input.txt': 'Randy',
'Code.py': 'Stan',
'Output.txt': 'Randy'
}
for i in filelist:
#print(i)
print(filelist[i])
class FileOwners:
# @staticmethod
def group_by_owners(files):
if not files:
return None
owners={}
for file in files:
if files[file
return None
filelist = {
'Input.txt': 'Randy',
'Code.py': 'Stan',
'Output.txt': 'Randy'
}
print(FileOwners.group_by_owners(filelist))
bool_one = 5 != 7
bool_two = 1 + 1 != 2
bool_three = 3 * 3 == 9
print(bool_one)
print(bool_two)
print(bool_three)
CodeAcademy boolen:
def age_check(age):
if age < 13:
return "you are only " + str(age) + " years old. Please come back with your parents."
elif age > 13:
return True
print(age_check(14))
def greater_than(x, y):
if x > y:
return x
elif x < y:
return y
else:
return "These numbers are the same"
print(greater_than(3, 3))
def graduation_reqs(credits):
if credits >= 120:
return "You have enough credits to graduate!"
else:
return None
print(graduation_reqs(120))
##
statement_one = (2 + 2 + 2 >= 6) and (-1 * -1 < 0)
print(statement_one)
statement_one =(2 + 2 + 2 >= 6) and (-1 * -1 < 0)
print(statement_one)
statement_two =(4 * 2 <= 8) and (7 - 1 == 6)
print(statement_two)
def graduation_reqs(credits,gpa):
if credits >= 120 and gpa>=2.0:
return "You meet the requirements to graduate!"
#
# The registrars office at Calvin Coolidge's Cool College has another request. They want to send out a mailer with information on the commencement ceremonies to students who have met at least one requirement for graduation (120 credits and 2.0 GPA).
#
# Write a function called graduation_mailer that takes two inputs, gpa and credits and checks if a student either has 120 or more credits or a GPA 2.0 or higher and if so returns True.
statement_one =(2 - 1 > 3) or (-5 * 2 == -10)
statement_two =(9 + 5 <= 15) or (7 != 4 + 3)
def graduation_mailer(credits,gpa):
if credits>=120 or gpa>=2.0:
return True
##
statement_one =not (4 + 5 <= 9)
print(statement_one)
statement_two =not (8 * 2) != 20 - 4
print(statement_two)
def graduation_reqs(gpa, credits):
if (gpa >= 2.0) and (credits >= 120):
return "You meet the requirements to graduate!"
elif (gpa >= 2.0) and not (credits >= 120):
return "You do not have enough credits to graduate."
elif not (gpa >= 2.0) and (credits >= 120):
return "Your GPA is not high enough to graduate."
else:
return "You do not meet either requirement to graduate!"
print(graduation_reqs(3.0, 110))
print(graduation_reqs(1.0, 120))
print(graduation_reqs(2.0, 130))
print(graduation_reqs(1.0, 90))
def graduation_reqs(gpa, credits):
if (gpa >= 2.0) and (credits >= 120):
return "You meet the requirements to graduate!"
if (gpa >= 2.0) and not (credits >= 120):
return "You do not have enough credits to graduate."
if not (gpa >= 2.0) and (credits >= 120):
return "Your GPA is not high enough to graduate."
else:
return "You do not meet the GPA or the credit requirement for graduation."
def grade_converter(gpa):
if gpa >= 4.0:
return "A"
elif gpa >= 3.0:
return "B"
elif gpa >= 2.0:
return "C"
elif gpa >= 1.0:
return "D"
else:
return "F"
print(grade_converter(0.2))
"""
#Except and Try
def divides(a,b):
try:
result = a / b
print (result)
except ZeroDivisionError:
print ("Can't divide by zero!")
print(divides(3,0))
##
# def raises_value_error():
# raise ValueError
#Write a try statement and an except statement around the line of code that executes the function to catch a ValueError and make the error message print You raised a ValueError!
def raises_value_error():
try:
raise ValueError
except ValueError:
print("You raised a ValueError!")
raises_value_error()
|
#!/usr/bin/python
# import high-level functions
import os
import sys
from pprint import pprint
class read:
def __init__(self, core, filename):
self.supportedseperators = ['";"', '\';\'',
'","', '\',\'',
'"|"', '\'|\'',
"\"\t\"", "'\t'",
';',
',',
'|',
"\t",
]
self.filename = filename
self.core = core
self.header = []
self.headerLine = 0
self.contents = {}
# Open file
self.f = open(filename, 'r')
# Read line by line
linenum = 0
self.header = self.readHeader()
self.contents = self.readContents()
def readHeader(self):
result = []
# Detect Header
# Header should be the first non-empty line not starting with a #
#
# Store linenumbers - they can be skipped later
self.headerLine = 0
for line in self.f:
# Increase linenumber
self.headerLine += 1
# Remove spacing around line
line = line.strip()
# If line is not empty and it doesn't start with a #-comment
if self.lineNotEmpty(line) and self.lineNotComment(line):
# This line is the header
# Detect Seperator character
self.seperator = self.detectSeperator(line)
# Split header line with seperator. Can be stored as header
keys = 0
for item in line.split(self.seperator):
keys = keys + 1
# If column header is not empty,
# Store the srtipped version
if self.lineNotEmpty(item):
if self.seperator is not None:
if self.seperator[0] is '"' or self.seperator[0] is '\'':
if item[0] == '"' or item[0] == '\'':
item = item[1:]
if item[-1:] is '"' or item[-1] is '\'':
item = item[:-1]
result.append(item.strip().lower())
# If column header is empty,
# Name the column
else:
result.append('column' + str(keys))
# Verify that merge-column is in header
self.core.log(self.filename + ': Headers detected on line ' + str(self.headerLine) + ': ' + ', '.join(result))
# Since we've found the header, we can now stop going through the file
break
# Verify that the required merge header is in the headers,
# if not; exit the program - this file cannot be merged
self.core.arguments['mergecolumn'] = self.core.arguments['mergecolumn'].lower()
if self.core.arguments['mergecolumn'] not in result:
print ''
print ' Fatal error! Column required for merging not found in header of ' + self.filename + '.'
print ' Did not find ' + self.core.arguments['mergecolumn'] + ' in ' + line + '.'
print ''
print ' Please verify file and try again.'
print ''
print ' Exiting...'
exit()
# Else: return the result
return result
# Check if a seperator can be detected in the header
def detectSeperator(self, line):
for seperator in self.supportedseperators:
if seperator in line:
self.core.log(self.filename + ': Seperator ' + seperator + ' detected in header')
return seperator
def readContents(self):
# Store linenumber
# Is used for skipping the header
linenumber = 0
# Prepare result
# Contains a dict with all content
result = {}
# Loop through all file lines
# This function continues where the readHeader stopped
for row in self.f:
line = {}
row = row.strip()
if self.lineNotEmpty(row) and self.lineNotComment(row):
#line = line.split(self.seperator)
index = 0
for column in row.split(self.seperator):
if index < len(self.header) and len(column.strip()) >= 1:
if self.seperator[0] is '"' or self.seperator[0] is '\'':
if column[0] == '"' or column[0] == '\'':
column = column[1:]
if column[-1:] is '"' or column[-1] is '\'':
column = column[:-1]
line[self.header[index]] = column
index += 1
# Since a row has been read into a line,
# add it to the result
# using mergecolumn as index
# mergecolumn in lowercase for easier matching
if self.core.arguments['mergecolumn'] in line:
result[line[self.core.arguments['mergecolumn']].lower()] = line
return result
# Return True if inputted Line is not empty
def lineNotEmpty(self, line):
line = line.strip()
if len(line) > 0:
return True
else:
return False
# Return True if the first non whitespace character of this line
# is not # of ; - indicating a comment
def lineNotComment(self, line):
line = line.strip()
if line[0] is '#' or line[0] is ';':
return False
else:
return True |
# users GET /users(.:format) users#index
# POST /users(.:format) users#create
# new_user GET /users/new(.:format) users#new
# edit_user GET /users/:id/edit(.:format) users#edit
# user GET /users/:id(.:format) users#show
# PATCH /users/:id(.:format) users#update
# PUT /users/:id(.:format) users#update
# DELETE /users/:id(.:format) users#destroy
class BaseApiModel:
def __init__(self, params):
self.params = params
self.base_url = "http://localhost:3000/"
self.class_name = self.__class__.__name__.lower()
def save(self):
url = self.base_url + self.class_name + "/new"
method = "POST"
params_s = ""
for key in self.params:
params_s += key + "=" + self.params[key] + "&"
params_s = params_s[0: -1]
return "curl --data" + " \""+ params_s +"\" " + url
if __name__ == '__main__':
b = BaseApiModel({'name': 'Waldo', 'email': 'uribe.fache@gmail.com'})
print b.save()
|
from enum import Enum
class SearchSessionStatus(Enum):
IN_PROGRESS = "IN_PROGRESS"
COMPLETE = "COMPLETE"
|
from math import*
r = float(input("Raio:"))
alt = float(input("Altura:"))
op = input("Opcao:")
volume = (4*pi*(r**3))/3
volcalota = (pi*(alt**2)*(3*r-alt))/3
combustive = volume - volcalota
if(op == "1"):
print(round(volcalota, 4))
if(op=="2"):
print(round(combustive, 4)) |
"""
Problemas propuestos
Cargar una cadena de caracteres por teclado. Mostrar la cadena del final al principio
utilizando subíndices negativos.
Confeccionar un programa con las siguientes funciones:
1) Cargar una lista con 5 palabras.
2) Intercambiar la primer palabra con la última.
3) Imprimir la lista
"""
def cargar(palabras):
print(palabras)
size = len(palabras)
ulti = palabras[size-1]
palabras[size-1] = palabras[0]
palabras[0] = ulti
print(palabras)
lista = ["hola", "esto", "es", "una", "prueba"]
cargar(lista) |
from flask import Flask, jsonify, request
import numpy as np
import datetime
import sendgrid
import os
from sendgrid.helpers.mail import *
app = Flask(__name__)
global_M = {} # Master dictionary of patients
@app.route("/api/new_patient", methods=["POST"])
def new_patient():
""" initialize patient and accept future HR measurements
"""
r = request.get_json()
check = validate_post(r)
print(check)
if check == 0:
error = "inputs not entered correctly"
print("inputs not entered correctly")
return jsonify(error)
p_id = r.get("patient_id")
if type(p_id) is not str:
raise TypeError("patient ID must be python string")
global global_M
if p_id in global_M:
print("patient already entered into system")
else:
global_M.update({p_id: r})
hr = []
p_info = global_M[p_id]
p_info["heart_rate"] = hr
global_M[p_id] = p_info
print(global_M)
return jsonify(p_id)
def validate_post(r):
""" checks for correct user input the correct dictionary keys
This is used in all of the post functions. It will determine if "check"
is 1 (correct) or 0 (incorrect).
:param r: the get request
:type r: dict
:return: check
:rtype: bool
"""
temp = {
"patient_id": "1",
"attending_email": "user@duke.edu",
"user_age": 1,
}
temp2 = {
"patient_id": "1",
"attending_email": "user@duke.edu",
"user_age": 1,
"heart_rate": [],
}
temp3 = {
"patient_id": "1",
"heart_rate": 400,
}
temp4 = {
"patient_id": "1",
"heart_rate_average_since": "2018-03-09 11:00:36.372339"
}
if set(r.keys()) == set(temp.keys()) or set(r.keys()) == set(temp2.keys())\
or set(r.keys()) == set(temp3.keys())\
or set(r.keys()) == set(temp4.keys()):
check = 1
else:
check = 0
# raise NameError("Inputs are not defined")
return check
@app.route("/api/heart_rate", methods=["POST"])
def heart_rate_store():
"""store HR measurement for user with that email, include current time stamp
"""
r = request.get_json()
stamp = datetime.datetime.now()
check = validate_post(r)
if check == 0:
error = "inputs not entered correctly"
print("inputs not entered correctly")
return jsonify(error)
p_id = r.get("patient_id")
if type(p_id) is not str:
raise TypeError("patient ID must be python string")
hr = r.get("heart_rate")
if type(hr) is not int:
raise TypeError("patient heart rate must be python integer")
p_hr = (hr, stamp)
global global_M
if p_id in global_M:
p_info = global_M[p_id]
p_info["heart_rate"].append(p_hr)
global_M[p_id] = p_info
print(global_M)
else:
print("Patient not yet entered into system")
return jsonify(p_id)
@app.route("/api/status/<patient_id>", methods=["GET"])
def status(patient_id):
"""return whether patient is currently tachycardic based on previously/
available heart rate and return time stamp of most recent heart rate
:param patient_id: the ID of the patient the user looks up
:type patient_id: int
:return: status, last_stamp
:rtype: str, datetime
"""
global global_M
p_id = patient_id
if p_id in global_M:
p_info = global_M[p_id]
p_hr = p_info["heart_rate"]
last_rec = p_hr[-1]
last_p_hr = last_rec[0]
last_stamp = last_rec[1]
p_age = p_info["user_age"]
status = is_tachycardic(last_p_hr, p_age, p_id)
else:
print("Patient not yet entered into system")
status = "none"
last_stamp = "none"
return jsonify(status, last_stamp)
def is_tachycardic(last_p_hr, p_age, p_id):
""" Checks if a patient is tachycardic based on HR
:param last_p_hr: last heart rate in hr vector
:param p_age: patient age
:param p_id: patient id
:type last_p_hr: int
:type p_age: int
:type p_id: int
:return: tachycardic status
:rtype: str
"""
if p_age < 1 and last_p_hr > 169:
status = "Tachycardic"
elif p_age <= 2 and last_p_hr > 151:
status = "Tachycardic"
elif p_age <= 4 and last_p_hr > 137:
status = "Tachycardic"
elif p_age <= 7 and last_p_hr > 133:
status = "Tachycardic"
elif p_age <= 11 and last_p_hr > 130:
status = "Tachycardic"
elif p_age <= 15 and last_p_hr > 119:
status = "Tachycardic"
elif p_age > 15 and last_p_hr > 100:
status = "Tachycardic"
else:
status = "Not Tachycardic"
if status == "Tachycardic":
send_email(p_id)
return status
def send_email(p_id):
global global_M
p_info = global_M[p_id]
email = p_info["attending_email"]
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("erica.skerrett@duke.edu")
to_email = Email(email)
subject = "Tachycardic Patient"
stamp = datetime.datetime.now()
content = Content("text/plain", "Patient {}".format(p_id)+" is\
tachycardic as of {}".format(stamp))
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
return
@app.route("/api/heart_rate/<patient_id>", methods=["GET"])
def heart_rate(patient_id):
# return all the previous heart rate measurements for that patient
global global_M
p_id = patient_id
if p_id in global_M:
p_info = global_M[p_id]
p_hr = p_info["heart_rate"]
dict = {
"patient_id": p_id,
"hr_list": p_hr,
}
else:
dict = "ERROR: Patient not yet entered into system."
return jsonify(dict)
@app.route("/api/heart_rate/average/<patient_id>", methods=["GET"])
def average(patient_id):
"""return the patient's average heart rate over all measurements
that are stored for this user
"""
global global_M
p_id = patient_id
if p_id in global_M:
p_info = global_M[p_id]
p_hr_tup = p_info["heart_rate"]
print(p_hr_tup)
p_hr = [x[0] for x in p_hr_tup]
print(p_hr)
avg_hr = find_avg(p_hr)
dict = {
"patient_id": p_id,
"heart_rate_avg": avg_hr,
}
else:
dict = "ERROR: Patient not yet entered into system."
return jsonify(dict)
def find_avg(p_hr):
""" finds the average of a vector
:param p_hr: vector of patient heart rates
:type p_hr: bytearray
:return: average heart rate
:rtype: float
"""
avg_hr = np.mean(p_hr)
return avg_hr
@app.route("/api/heart_rate/interval_average", methods=["POST"])
def interval_average():
""" used to get the average heart rate since a specified time
"""
r = request.get_json()
check = validate_post(r)
if check == 0:
error = "inputs not entered correctly"
print("inputs not entered correctly")
return jsonify(error)
p_id = r.get("patient_id")
time = r.get("heart_rate_average_since")
time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S.%f')
global global_M
if p_id in global_M:
p_info = global_M[p_id]
hr_list = p_info["heart_rate"]
hr_int = lookup(hr_list, time)
avg = find_avg(hr_int)
print("avg")
print(avg)
else:
avg = "Patient not yet entered into system"
return jsonify(avg)
def lookup(hr_list, time):
"""finds the x values that correspond with a given y value range
:param hr_list: heart rates and time stamps
:type hr_list: list of tuples
:param time: time of the beginning of the avg interval
:type time: datetime
:return: list of heart rates to average
:rtype: bytearray
"""
hr_int = [x for x, y in hr_list if y >= time]
return hr_int
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
#!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
# noinspection PyCompatibility
from urllib.parse import urlencode
# noinspection PyCompatibility
from urllib.request import urlopen
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from intents import appointments
from intents import login
from intents import sales
from intents import weather
from intents import targets
# Flask app should start in global layout
app = Flask(__name__)
class AuthError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
print("Response:")
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def get_token(req):
try:
contexts = req.get("result").get("contexts")
for c in contexts:
if c['name'] == "token":
return c.get("parameters").get("token")
except (RuntimeError, TypeError, NameError):
raise AuthError("No session token")
def get_business_token(req):
try:
contexts = req.get("result").get("contexts")
for c in contexts:
if c['name'] == "token":
return c.get("parameters").get("business_token")
except (RuntimeError, TypeError, NameError):
raise AuthError("No session token")
def processRequest(req):
print("action:" + req.get("result").get("action"))
base_url = "https://staging.kitomba.com"
token = get_token(req)
business_token = get_business_token(req)
print(token)
print(business_token)
res = {} # handles non matching case
if req.get("result").get("action") == "login":
res = login.login(req)
if req.get("result").get("action") == "yahooWeatherForecast":
res = weather.doYahooWeatherForecast(req)
if req.get("result").get("action") == "appointments.first_visit":
print(req)
day = req.get("result").get("parameters").get("day")
print(day)
res = appointments.first_visit(base_url, token, business_token, day)
if req.get("result").get("action") == "appointments.today":
print(req)
day = req.get("result").get("parameters").get("day")
print(day)
res = appointments.today(base_url, token, business_token, day)
if req.get("result").get("action") == "sales.day":
print(req)
date = req.get("result").get("parameters").get("date")
print(date)
res = sales.today(base_url, token, business_token, date)
if req.get("result").get("action") == "targets":
print(req)
res = targets.today(base_url, token, business_token)
return res
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
def monthle_payment(principal, annual_interest_rate, duration):
r = annual_interest_rate / 1200
n = duration * 12
if r != 0 :
y = (principal * (r*((1+r)**n)))/(((1+r)**n)-1)
else :
y = principal / n
return y
|
'''
Data access wrappers for WSD datasets in Raganato et al
WSD Evaluation Framework project
'''
from lib import wsd_parser
class WSDDataset:
def __init__(self, config, name):
self.config = config
self.name = name
(
self.labels,
self.unique_sense_IDs
) = wsd_parser.readLabels(self.config[name]['Labels'])
(
self.sentences_words,
self.sentences_instances
) = wsd_parser.processSentences(self.config[name]['XML'], get_lemmas=True)
class SemCor(WSDDataset):
def __init__(self, config):
super().__init__(config, 'SemCor')
class SemEval2007(WSDDataset):
def __init__(self, config):
super().__init__(config, 'SemEval2007')
class SemEval2013(WSDDataset):
def __init__(self, config):
super().__init__(config, 'SemEval2013')
class SemEval2015(WSDDataset):
def __init__(self, config):
super().__init__(config, 'SemEval2015')
class SensEval2(WSDDataset):
def __init__(self, config):
super().__init__(config, 'SensEval2')
class SensEval3(WSDDataset):
def __init__(self, config):
super().__init__(config, 'SensEval3')
class EvalAll(WSDDataset):
def __init__(self, config):
super().__init__(config, 'EvalAll')
def allAsList(config, test_only=False):
if test_only:
lst = []
else:
lst = [SemCor(config)]
lst.extend([
SemEval2007(config),
SemEval2013(config),
SemEval2015(config),
SensEval2(config),
SensEval3(config)
])
return lst
|
from django.db import models
# Create your models here.
class Exam(models.Model):
name = models.TextField()
instruction = models.TextField()
duration = models.IntegerField()
class Question(models.Model):
examId = models.ForeignKey(Exam, on_delete=models.CASCADE)
question = models.TextField()
answerPosition: models.IntegerField
class Options(models.Model):
questionId = models.ForeignKey(Question, on_delete=models.CASCADE)
questionPosition = models.IntegerField()
option: models.TextField()
class Users(models.Model):
name=models.CharField(max_length=200)
email=models.EmailField(max_length=200)
password=models.CharField(max_length=200)
isEnabled = models.BooleanField()
|
import collections
import copy
import errno
import fcntl
import fnmatch
import glob
import inspect
import multiprocessing
import os
import os.path
import pprint
import re
import scapy.all as scapy
import signal
import socket
import subprocess
import sys
import tempfile
import time
import traceback
import commands as bess_commands
from pybess.module import *
from common import *
import modes
available_cores = list(range(multiprocessing.cpu_count()))
DEFAULT_STATS_CSV = '/tmp/bench.csv'
stats_csv = DEFAULT_STATS_CSV
def get_var_attrs(cli, var_token, partial_word):
var_type = None
var_desc = ''
var_candidates = []
try:
if var_token == 'ENABLE_DISABLE':
var_type = 'endis'
var_desc = 'one or more worker IDs'
var_candidates = ['enable', 'disable']
elif var_token == '[BESSD_OPTS...]':
var_type = 'opts'
var_desc = 'bess daemon command-line options (see "bessd -h")'
elif var_token == 'MODE':
var_type = 'name'
var_desc = 'which type of traffic to generate'
try:
var_candidates = ['flowgen', 'udp', 'http']
except:
pass
elif var_token == 'PORT':
var_type = 'portid'
var_desc = 'a port identifier'
elif var_token == 'PORT...':
var_type = 'portid+'
var_desc = 'a port identifier'
elif var_token == '[TRAFFIC_SPEC...]':
var_type = 'map'
elif var_token == 'CSV':
var_type = 'filename'
var_desc = 'a path to a csv file'
except socket.error as e:
if e.errno in [errno.ECONNRESET, errno.EPIPE]:
cli.bess.disconnect()
else:
raise
except cli.bess.APIError:
pass
if var_type is None:
return None
else:
return var_type, var_desc, var_candidates
# Return (head, tail)
# head: consumed string portion
# tail: the rest of input line
# You can assume that 'line == head + tail'
def split_var(cli, var_type, line):
if var_type in ['name', 'filename', 'endis', 'int', 'portid']:
pos = line.find(' ')
if pos == -1:
head = line
tail = ''
else:
head = line[:pos]
tail = line[pos:]
elif var_type in ['wid+', 'name+', 'map', 'pyobj', 'opts', 'portid+']:
head = line
tail = ''
else:
raise cli.InternalError('type "%s" is undefined', var_type)
return head, tail
def _parse_map(**kwargs):
return kwargs
# Return (mapped_value, tail)
# mapped_value: Python value/object from the consumed token(s)
# tail: the rest of input line
def bind_var(cli, var_type, line):
head, remainder = split_var(cli, var_type, line)
# default behavior
val = head
if var_type == 'endis':
if 'enable'.startswith(val):
val = 'enable'
elif 'disable'.startswith(val):
val = 'disable'
else:
raise cli.BindError('"endis" must be either "enable" or "disable"')
elif var_type == 'name':
if re.match(r'^[_a-zA-Z][\w]*$', val) is None:
raise cli.BindError('"name" must be [_a-zA-Z][_a-zA-Z0-9]*')
elif var_type == 'name+':
val = sorted(list(set(head.split()))) # collect unique items
for name in val:
if re.match(r'^[_a-zA-Z][\w]*$', name) is None:
raise cli.BindError('"name" must be [_a-zA-Z][_a-zA-Z0-9]*')
elif var_type == 'portid':
if re.match(r'^[\d\.:]*$', val) is None:
raise cli.BindError('"name" must be [.:0-9]*')
elif var_type == 'portid+':
val = sorted(list(set(head.split()))) # collect unique items
for name in val:
if re.match(r'^[\d\.:]*$', name) is None:
raise cli.BindError('"name" must be [.:0-9]*')
elif var_type == 'filename':
if val.find('\0') >= 0:
raise cli.BindError('Invalid filename')
elif var_type == 'map':
try:
val = eval('_parse_map(%s)' % head)
except:
raise cli.BindError('"map" should be "key=val, key=val, ..."')
elif var_type == 'pyobj':
try:
if head.strip() == '':
val = None
else:
val = eval(head)
except:
raise cli.BindError(
'"pyobj" should be an object in python syntax'
' (e.g., 42, "foo", ["hello", "world"], {"bar": "baz"})')
elif var_type == 'opts':
val = val.split()
elif var_type == 'int':
try:
val = int(val)
except Exception:
raise cli.BindError('Expected an integer')
return val, remainder
bessctl_cmds = [
'monitor pipeline',
]
cmdlist = filter(lambda x: x[0] in bessctl_cmds, bess_commands.cmdlist)
def cmd(syntax, desc=''):
def cmd_decorator(func):
cmdlist.append((syntax, desc, func))
return cmd_decorator
@cmd('help', 'List available commands')
def help(cli):
for syntax, desc, _ in cmdlist:
cli.fout.write(' %-50s%s\n' % (syntax, desc))
def _show_config(cli, port):
sess = cli.get_session(port)
cli.fout.write('Port %s\n' % (port,))
divider = '-' * (4 + len(port)) + '\n'
cli.fout.write(divider)
cli.fout.write('mode: %23s\n' % (sess.mode(),))
cli.fout.write(str(sess.spec()) + '\n')
cli.fout.write(divider)
def _show_configs(cli, ports):
sorted(list(set(ports)))
for port in ports:
_show_config(cli, port)
@cmd('show config', 'Show the current confiugration of all ports')
def show_config_all(cli):
_show_configs(cli, cli.ports())
@cmd('show config PORT...', 'Show the current confiugration of a port')
def show_config_all(cli, ports):
_show_configs(cli, ports)
def _do_reset(cli):
for port in cli.ports():
_stop(cli, port)
with cli.bess_lock:
cli.bess.pause_all()
cli.bess.reset_all()
cli.bess.resume_all()
@cmd('reset', 'Reset trafficgen')
def reset(cli):
bess_commands.warn(cli, 'Going to reset everything.', _do_reset)
PortRate = collections.namedtuple('PortRate',
['inc_packets', 'inc_dropped', 'inc_bytes',
'rtt_avg', 'rtt_med', 'rtt_99',
'jitter_avg', 'jitter_med', 'jitter_99',
'out_packets', 'out_dropped', 'out_bytes'])
def _monitor_ports(cli, *ports):
global stats_csv
def get_delta(old, new):
sec_diff = new['timestamp'] - old['timestamp']
return PortRate(
inc_packets=(new['inc_packets'] - old['inc_packets']) / sec_diff,
inc_dropped=(new['inc_dropped'] - old['inc_dropped']) / sec_diff,
inc_bytes=(new['inc_bytes'] - old['inc_bytes']) / sec_diff,
rtt_avg=(new['rtt_avg'] + old['rtt_avg']) / 2,
rtt_med=(new['rtt_med'] + old['rtt_med']) / 2,
rtt_99=(new['rtt_99'] + old['rtt_99']) / 2,
jitter_avg=(new['jitter_avg'] + old['jitter_avg']) / 2,
jitter_med=(new['jitter_med'] + old['jitter_med']) / 2,
jitter_99=(new['jitter_99'] + old['jitter_99']) / 2,
out_packets=(new['out_packets'] - old['out_packets']) / sec_diff,
out_dropped=(new['out_dropped'] - old['out_dropped']) / sec_diff,
out_bytes=(new['out_bytes'] - old['out_bytes']) / sec_diff)
def print_header(timestamp):
cli.fout.write('\n')
cli.fout.write('%-20s%14s%10s%10s%15s%15s%15s%15s%15s%15s %14s%10s%10s\n' %
(time.strftime('%X') + str(timestamp % 1)[1:8],
'INC Mbps', 'Mpps', 'dropped',
'Avg RTT (us)', 'Med RTT (us)', '99th RTT (us)',
'Avg Jit (us)', 'Med Jit (us)', '99th Jit (us)',
'OUT Mbps', 'Mpps', 'dropped'))
cli.fout.write('%s\n' % ('-' * 186))
def print_footer():
cli.fout.write('%s\n' % ('-' * 186))
def print_delta(port, delta, timestamp):
stats = (port,
(delta.inc_bytes + delta.inc_packets * 24) * 8 / 1e6,
delta.inc_packets / 1e6,
delta.inc_dropped,
delta.rtt_avg,
delta.rtt_med,
delta.rtt_99,
delta.jitter_avg,
delta.jitter_med,
delta.jitter_99,
(delta.out_bytes + delta.out_packets * 24) * 8 / 1e6,
delta.out_packets / 1e6,
delta.out_dropped)
cli.fout.write('%-20s%14.1f%10.3f%10d%15.3f%15.3f%15.3f%15.3f%15.3f%15.3f '
'%14.1f%10.3f%10d\n' % stats)
with open(stats_csv, 'a+') as f:
line = '%s,%s,%.1f,%.3f,%d,%.3f,%.3f,%.3f,%.3f,%.3f,%.3f,%.1f,%.3f,%d\n'
line %= (time.strftime('%X') + str(timestamp % 1)[1:8],) + stats
f.write(line)
def get_total(arr):
total = copy.deepcopy(arr[0])
for stat in arr[1:]:
total['inc_packets'] += stat['inc_packets']
total['inc_dropped'] += stat['inc_dropped']
total['inc_bytes'] += stat['inc_bytes']
total['rtt_avg'] += stat['rtt_avg']
total['rtt_med'] += stat['rtt_med']
total['rtt_99'] += stat['rtt_99']
total['jitter_avg'] += stat['jitter_avg']
total['jitter_med'] += stat['jitter_med']
total['jitter_99'] += stat['jitter_99']
total['out_packets'] += stat['out_packets']
total['out_dropped'] += stat['out_dropped']
total['out_bytes'] += stat['out_bytes']
return total
def get_all_stats(cli, sess):
stats = cli.bess.get_port_stats(sess.port())
try:
ret = {
'inc_packets': stats.inc.packets,
'out_packets': stats.out.packets,
'inc_bytes': stats.inc.bytes,
'out_bytes': stats.out.bytes,
'inc_dropped': stats.inc.dropped,
'out_dropped': stats.out.dropped,
'timestamp': stats.timestamp,
}
except:
ret = {
'inc_packets': 0,
'out_packets': 0,
'inc_bytes': 0,
'out_bytes': 0,
'inc_dropped': 0,
'out_dropped': 0,
'timestamp': time.time(),
}
rtt_now = sess.curr_rtt()
if rtt_now is None:
rtt_now = {'rtt_avg': 0, 'rtt_med': 0, 'rtt_99': 0,
'jitter_avg': 0, 'jitter_med': 0, 'jitter_99': 0}
ret.update(rtt_now)
return ret
all_ports = sorted(cli.bess.list_ports().ports, key=lambda x: x.name)
drivers = {}
for port in all_ports:
drivers[port.name] = port.driver
if not ports:
ports = [port.name for port in all_ports]
if not ports:
raise cli.CommandError('No port to monitor')
cli.fout.write('Monitoring ports: %s (Send CTRL + c to stop)\n' %
', '.join(ports))
last = {}
now = {}
csv_header = '#' + ','.join(['time', 'port',
'inc_mbps', 'inc_mpps', 'inc_dropped',
'avg_rtt_us', 'med_rtt_us', '99th_rtt_us',
'avg_jit_us', 'med_jit_us', '99th_jit_us',
'out_mbps', 'out_mpps', 'out_dropped']) + '\n'
with open(stats_csv, 'w+') as f:
for port in ports:
line = '#port ' + port + ': '
line += str(cli.get_session(port).spec()).replace('\n', '; ')
line = re.sub('\s+', ' ', line) + '\n'
f.write(line)
f.write(csv_header)
for port in ports:
sess = cli.get_session(port)
last[port] = get_all_stats(cli, sess)
try:
while True:
time.sleep(1)
for port in ports:
sess = cli.get_session(port)
now[port] = get_all_stats(cli, sess)
print_header(now[port]['timestamp'])
for port in ports:
print_delta('%s/%s' % (port, drivers[port]),
get_delta(last[port], now[port]), now[port]['timestamp'])
print_footer()
if len(ports) > 1:
print_delta('Total', get_delta(
get_total(last.values()),
get_total(now.values())))
for port in ports:
last[port] = now[port]
except KeyboardInterrupt:
pass
@cmd('monitor port', 'Monitor the current traffic of all ports')
def monitor_port_all(cli):
_monitor_ports(cli)
@cmd('monitor port PORT...', 'Monitor the current traffic of specified ports')
def monitor_port_all(cli, ports):
_monitor_ports(cli, *ports)
@cmd('set csv CSV', 'Set the CSV file for stats output')
def set_csv(cli, csv):
global stats_csv
stats_csv = csv
def _connect_pipeline(cli, pipe):
for i in range(len(pipe)):
u = pipe[i]
if i < len(pipe) - 1:
v = pipe[i + 1]
u.connect(v)
def _create_rate_limit_tree(cli, wid, resource, limit):
rl_name = 'rl_pps_w%d' % (wid,)
cli.bess.add_tc(rl_name, wid=wid, policy='rate_limit',
resource=resource, limit={resource: limit})
return rl_name
def _create_port_args(cli, port_id, num_rx_cores, num_tx_cores):
args = {'driver': None, 'name': port_id,
'arg': {'num_inc_q': num_rx_cores, 'num_out_q': num_tx_cores,
'size_inc_q': 2048, 'size_out_q': 2048}}
args['driver'] = 'PMDPort'
if re.match(r'^\d\d:\d\d.\d$', port_id) is not None:
args['arg']['pci'] = port_id
else:
try:
args['arg']['port_id'] = int(port_id)
except:
raise cli.CommandError('Invalid port index')
return args
@cmd('start PORT MODE [TRAFFIC_SPEC...]', 'Start sending packets on a port')
def start(cli, port, mode, spec):
setup_mclasses(cli, globals())
global available_cores
if not isinstance(port, str):
raise cli.CommandError('Port identifier must be a string')
if cli.port_is_running(port):
bess_commands.warn(cli, 'Port %s is already running.' % (port,),
_stop, port)
# Allocate cores if necessary
if spec is not None:
if 'tx_cores' in spec:
tx_cores = list(map(int, spec.pop('tx_cores').split(' ')))
else:
if len(available_cores) > 0:
tx_cores = [available_cores.pop(0)]
else:
raise cli.InternalError('No available cores.')
if 'rx_cores' in spec:
rx_cores = list(map(int, spec.pop('rx_cores').split(' ')))
elif 'rx_cores' not in spec and 'tx_cores' not in spec:
rx_cores = tx_cores
else:
if len(available_cores) > 0:
rx_cores = [available_cores.pop(0)]
else:
raise cli.InternalError('No available cores.')
else:
if len(available_cores) > 0:
tx_cores = [available_cores.pop(0)]
rx_cores = tx_cores
else:
raise cli.InternalError('No available cores.')
# Create the port
num_tx_cores = len(tx_cores)
num_rx_cores = len(rx_cores)
num_cores = num_tx_cores + num_rx_cores
port_args = _create_port_args(cli, port, num_tx_cores, num_rx_cores)
with cli.bess_lock:
ret = cli.bess.create_port(port_args['driver'], port_args['name'],
arg=port_args['arg'])
port = ret.name
if spec is not None and 'src_mac' not in spec:
spec['src_mac'] = ret.mac_addr
# Find traffic mode
tmode = None
for x in modes.__dict__:
m = modes.__dict__[x]
if getattr(m, 'name', '') == mode:
tmode = m
if tmode is None:
raise cli.CommandError("Mode %s is invalid" % (mode,))
# Initialize the pipelines
if spec is not None:
ts = tmode.Spec(tx_cores=tx_cores, rx_cores=rx_cores, **spec)
else:
ts = tmode.Spec(src_mac=ret.mac_addr, tx_cores=tx_cores,
rx_cores=rx_cores)
tx_pipes = dict()
rx_pipes = dict()
with cli.bess_lock:
cli.bess.pause_all()
# Setup TX pipelines
for i, core in enumerate(tx_cores):
cli.bess.add_worker(wid=core, core=core)
tx_pipe = tmode.setup_tx_pipeline(cli, port, ts)
# These modules are required across all pipelines
tx_pipe.tx_rr = RoundRobin(gates=[0])
tx_pipe.tx_q = Queue()
tx_pipe.modules += [tx_pipe.tx_q,
Timestamp(offset=ts.tx_timestamp_offset),
tx_pipe.tx_rr]
q = QueueOut(port=port, qid=i)
sink = Sink()
tx_pipe.tx_rr.connect(q, 0, 0)
tx_pipe.tx_rr.connect(sink, 1, 0)
tx_pipes[core] = tx_pipe
# Setup rate limiting, pin pipelines to cores, connect pipelines
src = tx_pipe.modules[0]
if ts.mbps is not None:
bps_per_core = long(1e6 * ts.mbps / num_tx_cores)
rl_name = \
_create_rate_limit_tree(cli, core, 'bit', bps_per_core)
cli.bess.attach_module(src.name, wid=core)
cli.bess.attach_module(tx_pipe.tx_q.name, rl_name)
elif ts.pps is not None:
pps_per_core = long(ts.pps / num_tx_cores)
rl_name = \
_create_rate_limit_tree(cli, core, 'packet', pps_per_core)
cli.bess.attach_module(src.name, wid=core)
cli.bess.attach_module(tx_pipe.tx_q.name, rl_name)
else:
rl_name = None
cli.bess.attach_module(src.name, wid=core)
cli.bess.attach_module(tx_pipe.tx_q.name, wid=core)
tx_pipe.tc = rl_name
_connect_pipeline(cli, tx_pipe.modules)
tx_pipe.modules += [q, sink]
# Setup RX pipelines
rx_qids = dict()
if num_rx_cores < num_tx_cores:
for i, core in enumerate(rx_cores):
rx_qids[core] = [i]
# round-robin remaining queues across rx_cores
for i in range(len(tx_cores[num_rx_cores:])):
core = rx_cores[(num_rx_cores + i) % num_rx_cores]
rx_qids[core].append(num_rx_cores + i)
for i, core in enumerate(rx_cores):
if core not in tx_cores:
cli.bess.add_worker(wid=core, core=core)
rx_pipe = tmode.setup_rx_pipeline(cli, port, ts)
queues = []
if core in rx_qids and len(rx_qids[core]) > 1:
m = Merge()
front = [m]
for j, qid in enumerate(rx_qids[core]):
q = QueueInc(port=port, qid=qid)
queues.append(q)
cli.bess.attach_module(q.name, wid=core)
q.connect(m, igate=j)
else:
q = QueueInc(port=port, qid=i)
front = [q]
cli.bess.attach_module(q.name, wid=core)
front += [
Measure(offset=ts.rx_timestamp_offset, jitter_sample_prob=1.0)]
rx_pipe.modules = front + rx_pipe.modules
rx_pipes[core] = rx_pipe
# Connect pipelines and pin to cores
_connect_pipeline(cli, rx_pipe.modules)
# TODO: maintain queues in a separate structure
rx_pipe.modules += queues
cli.bess.resume_all()
sess = Session(port, ts, mode, tx_pipes, rx_pipes, cli.bess, cli)
sess.start_monitor()
cli.add_session(sess)
def _stop(cli, port):
global available_cores
sess = cli.remove_session(port)
sess.stop_monitor()
reclaimed_cores = sess.spec().tx_cores + sess.spec().rx_cores
available_cores = list(sorted(available_cores + reclaimed_cores))
with cli.bess_lock:
cli.bess.pause_all()
try:
workers = set()
for core, pipe in sess.tx_pipelines().items():
for m in pipe.modules:
cli.bess.destroy_module(m.name)
workers.add(core)
for core, pipe in sess.rx_pipelines().items():
for m in pipe.modules:
cli.bess.destroy_module(m.name)
workers.add(core)
for worker in workers:
cli.bess.destroy_worker(worker)
cli.bess.destroy_port(sess.port())
finally:
cli.bess.resume_all()
@cmd('stop PORT...', 'Stop sending packets on a set of ports')
def stop(cli, ports):
for port in ports:
_stop(cli, port)
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial=tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1,2,2,1], padding='SAME')
def train_and_save_model(filename):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Placeholders:
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
# Model Parameters
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1]) # x is a [picture_ct, 28*28], so x_image is [picture_ct, 28, 28, 1]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder_with_default(1.0, ())
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2=weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_count = tf.count_nonzero(correct_prediction)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Set up training criterion
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Initializer step
init_op = tf.global_variables_initializer() # must be after adamoptimizer, since that creates more vars
# Configure saver
saver = tf.train.Saver()
tf.add_to_collection('mnist', x)
tf.add_to_collection('mnist', y_)
tf.add_to_collection('mnist', keep_prob)
tf.add_to_collection('mnist', y_conv)
tf.add_to_collection('mnist', correct_count)
tf.add_to_collection('mnist', cross_entropy)
# Train the model
with tf.Session() as sess:
sess.run(init_op)
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_:batch[1], keep_prob:1.0})
print("Step {}: Training accuracy {}".format(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_:batch[1], keep_prob:0.5})
save_path = saver.save(sess, filename)
print('Model saved to: {}'.format(filename))
if __name__ == '__main__':
train_and_save_model('./mnist-model')
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""Tests for various encoding issues throughout the library"""
import unittest
import cybox.bindings as bindings
from cybox.common import Contributor, String, MeasureSource
from cybox.core import Observable
from cybox.objects.code_object import Code, CodeSegmentXOR
from cybox.objects.whois_object import WhoisEntry
from cybox.test import round_trip
UNICODE_STR = "❤ ♎ ☀ ★ ☂ ♞ ☯ ☭ ☢ €☎⚑ ❄♫✂"
class EncodingTests(unittest.TestCase):
def test_double_encode(self):
s = String(UNICODE_STR)
s2 = round_trip(s)
def test_contributor(self):
c = Contributor()
c.name = UNICODE_STR
c.role = UNICODE_STR
c.email = UNICODE_STR
c.phone = UNICODE_STR
c.organization = UNICODE_STR
c2 = round_trip(c)
def test_observable(self):
o = Observable()
o.title = UNICODE_STR
o2 = round_trip(o)
def test_code(self):
cs = Code()
cs.code_segment_xor = CodeSegmentXOR()
cs.code_segment_xor.xor_pattern = UNICODE_STR
cs2 = round_trip(cs)
self.assertEqual(cs.to_dict(), cs2.to_dict())
def test_measure_source(self):
o = MeasureSource()
o.name = UNICODE_STR
o2 = round_trip(o)
def test_whois(self):
o = WhoisEntry()
o.dnssec = UNICODE_STR
o2 = round_trip(o)
def test_quote_xml(self):
s = bindings.quote_xml(UNICODE_STR)
self.assertEqual(s, UNICODE_STR)
def test_quote_attrib(self):
"""Tests that the stix.bindings.quote_attrib method works properly
on unicode inputs.
Note:
The quote_attrib method (more specifically, saxutils.quoteattr())
adds quotation marks around the input data, so we need to strip
the leading and trailing chars to test effectively
"""
s = bindings.quote_attrib(UNICODE_STR)
s = s[1:-1]
self.assertEqual(s, UNICODE_STR)
def test_quote_attrib_int(self):
i = 65536
s = bindings.quote_attrib(i)
self.assertEqual('"65536"', s)
def test_quote_attrib_bool(self):
b = True
s = bindings.quote_attrib(b)
self.assertEqual('"True"', s)
def test_quote_xml_int(self):
i = 65536
s = bindings.quote_xml(i)
self.assertEqual(str(i), s)
def test_quote_xml_bool(self):
b = True
s = bindings.quote_xml(b)
self.assertEqual(str(b), s)
def test_quote_xml_encoded(self):
encoding = bindings.ExternalEncoding
encoded = UNICODE_STR.encode(encoding)
quoted = bindings.quote_xml(encoded)
self.assertEqual(UNICODE_STR, quoted)
def test_quote_attrib_encoded(self):
encoding = bindings.ExternalEncoding
encoded = UNICODE_STR.encode(encoding)
quoted = bindings.quote_attrib(encoded)[1:-1]
self.assertEqual(UNICODE_STR, quoted)
def test_quote_xml_zero(self):
i = 0
s = bindings.quote_xml(i)
self.assertEqual(str(i), s)
def test_quote_attrib_zero(self):
i = 0
s = bindings.quote_attrib(i)
self.assertEqual('"0"', s)
def test_quote_xml_none(self):
i = None
s = bindings.quote_xml(i)
self.assertEqual('', s)
def test_quote_attrib_none(self):
i = None
s = bindings.quote_attrib(i)
self.assertEqual('""', s)
def test_quote_attrib_empty(self):
i = ''
s = bindings.quote_attrib(i)
self.assertEqual('""', s)
def test_quote_xml_empty(self):
i = ''
s = bindings.quote_xml(i)
self.assertEqual('', s)
def test_to_xml_utf16_encoded(self):
encoding = 'utf-16'
o = Observable()
o.title = UNICODE_STR
xml = o.to_xml(encoding=encoding)
self.assertTrue(UNICODE_STR in xml.decode(encoding))
def test_to_xml_default_encoded(self):
o = Observable()
o.title = UNICODE_STR
xml = o.to_xml()
self.assertTrue(UNICODE_STR in xml.decode('utf-8'))
def test_to_xml_no_encoding(self):
o = Observable()
o.title = UNICODE_STR
xml = o.to_xml(encoding=None)
self.assertTrue(isinstance(xml, str))
self.assertTrue(UNICODE_STR in xml)
if __name__ == "__main__":
unittest.main()
|
import discord
import asyncio
from random import randint
import time
#save token to text file
token = open('token.txt', 'r').read()
#start discord client
client = discord.Client()
#when connected
@client.event
async def on_ready():
print(f'We have logged in as {client.user}')
#when message is sent
@client.event
async def on_message(message):
print(f'{message.channel}: {message.author}: {message.author.name}: {message.content}')
#if the message was sent by an account other than this bot
if message.author.name != 'GambleBot':
#save username
username = message.author.name+message.author.discriminator
users = []
money = 0
#read saved user data
inf = open('users.txt', 'w+')
newa = inf.readlines()
if len(newa) > 0:
for line in newa:
line=line.rstrip('\n')
currentline = line.split(',')
users.append([currentline[0], currentline[1]])
inf.close()
#if no user information, add message author
if len(users) == 0:
users.append([username, 100])
print(users)
with open('users.txt', 'w') as f:
for _string in users:
f.write(_string[0]+','+str(_string[1])+'\n')
else:
#if message author not in data, add
valid = False
for x in users:
if x[0] == username:
valid = True
if valid == False:
users.append([username, 100])
print(users)
with open('users.txt', 'w') as f:
for _string in users:
f.write(_string[0]+','+str(_string[1])+'\n')
#if message author gives bot a commend
if 'bot' in message.content.lower():
#save message author's money
for i in users:
if i[0] == username:
money = int(i[1])
#limit message author's bet ammount
possible_numbers = []
for i in range(1, 1001):
possible_numbers.append(str(i))
#if message author's bet is valid, save bet
if len(message.content) > 0:
for i in possible_numbers:
if i in message.content:
if int(i)<=money:
bet = int(i)
else:
await message.channel.send(content='You cannot bet more than you have')
#if message author asks for rules
if 'rules' in message.content.lower():
await message.channel.send(content= (
'bot money = see your balance \n'
'bot top = scoreboard \nbot coin = coinflip \n'
'bot slots = slots \n'
'bot guess = guess a letter between a and e \n'
'bot rps = rock paper scissors \n'
'bot light = red light green light'))
#if message author wants to see the scoreboard
if 'top' in message.content.lower():
users.sort(key=lambda x: int(x[1]), reverse=True)
print(users)
stre = ''
for x in users:
stre+=x[0]+': '+x[1]+'\n'
await message.channel.send(content='SCORES: \n'+stre)
#if message author wants to play a guessing game
if 'guess' in message.content.lower():
answer = randint(0, 4)
#if the message author correctly guessed 'a'
if (answer == 0) and ('a' in message.content.lower()):
await message.channel.send(content='You won '+str(bet*1000))
money += (bet*1000)
#if the message author correctly guessed 'b'
elif (answer == 1) and ('b' in message.content.lower()):
await message.channel.send(content='You won '+str(bet*1000))
money += (bet*1000)
#if the message author correctly guessed 'c'
elif (answer == 2) and ('c' in message.content.lower()):
await message.channel.send(content='You won '+str(bet*1000))
money += (bet*1000)
#if the message author correctly guessed 'd'
elif (answer == 3) and ('d' in message.content.lower()):
await message.channel.send(content='You won '+str(bet*1000))
money += (bet*1000)
#if the message author correctly guessed 'e'
elif (answer == 4) and ('e' in message.content.lower()):
await message.channel.send(content='You won '+str(bet*1000))
money += (bet*1000)
#if message author guessed incorrectly
else:
await message.channel.send(content='You lost '+str(bet))
money -= bet
#if message author wants to check their balance
if 'money' in message.content.lower():
await message.channel.send(content='You have '+str(money))
#if message author is broke, add more
if money < 1:
money = 100
for i in users:
if i[0] == username:
i[1] = money
with open('users.txt', 'w') as f:
for _string in users:
f.write(_string[0]+','+str(_string[1])+'\n')
await message.channel.send(content='Restart. Now you have '+str(money))
#if message author wants to play flip a coin
if 'coin' in message.content.lower():
#save message author's guess
if 'heads' in message.content.lower():
guess='heads'
if 'tails' in message.content.lower():
guess='tails'
#play animation
message = await message.channel.send(content=':full_moon:')
time.sleep(0.5)
await message.edit(content=':new_moon:')
time.sleep(0.5)
await message.edit(content=':full_moon:')
time.sleep(0.5)
await message.edit(content=':new_moon:')
time.sleep(0.5)
await message.edit(content=':full_moon:')
#flip coin
value = randint(0, 1)
#if the coin is heads
if value == 0:
#if message author correctly guessed heads
if guess == 'heads':
await message.channel.send(content='Heads! You won '+str(bet))
money += bet
#if message author incorrectly guessed tails
else:
await message.channel.send(content='Heads! You lost '+str(bet))
money -= bet
#if the coin is tails
elif value == 1:
#if message author correctly guessed tails
if guess == 'tails':
await message.channel.send(content='Tails! You won '+str(bet))
money += bet
#if message author incorrectly guessed heads
else:
await message.channel.send(content='Tails! You lost '+str(bet))
money -= bet
#update and save users money to text file
for i in users:
if i[0] == username:
i[1] = money
with open('users.txt', 'w') as f:
for _string in users:
f.write(_string[0]+','+str(_string[1])+'\n')
#if message author is out of money, add more
if money<= 1:
await message.channel.send(content='You are broke')
money = 100
#if message author wants to play slots
if 'slots' in message.content.lower():
#print slots on discord
slots=[]
message = await message.channel.send(content=slots)
for i in range(0, 3):
value = randint(0, 3)
if value == 0:
slots.append(':tangerine:')
await message.edit(content=slots)
if value == 1:
slots.append(':banana:')
await message.edit(content=slots)
if value == 2:
slots.append(':watermelon:')
await message.edit(content=slots)
if value == 3:
slots.append(':strawberry:')
await message.edit(content=slots)
#if slots matches on tangerine
if slots[0] == slots[1] == slots[2] == ':tangerine:':
money += bet
await message.channel.send(content='You won '+str(bet))
#if slots matches on banana
elif slots[0] == slots[1] == slots[2] == ':banana:':
money += (bet*2)
await message.channel.send(content='You won '+str(bet))
#if slots matches on watermelon
elif slots[0] == slots[1] == slots[2] == ':watermelon:':
money += (bet*2)
await message.channel.send(content='You won '+str(bet*2))
#if slots matches on strawberry
elif slots[0] == slots[1] == slots[2] == ':strawberry:':
money += (bet*3)
await message.channel.send(content='You won '+str(bet*3))
#if slots did not match
else:
await message.channel.send(content='You lost '+str(bet))
money -= bet
#if message author wants to play rock paper scissors
if 'rps' in message.content.lower():
userplays=''
pcplays=''
userid = str(message.author.id)
choices = ['🗿', '📄', '✂']
#print game and wait for response
message = await message.channel.send(content='Rock, Paper, Scissors \nChoose one:')
for i in choices:
await message.add_reaction(i)
def checkfor(reaction, user):
return str(user.id) == userid and str(reaction.emoji) in choices
loopclose = 0
while loopclose == 0:
try:
reaction, user = await client.wait_for('reaction_add', timeout=10, check=checkfor)
#if message author plays rock
if reaction.emoji == '🗿':
userplays = '🗿'
#get bot move
value = randint(0, 2)
#if bot plays rock
if value == 0:
pcplays = '🗿'
await message.edit(content=userplays+' vs '+pcplays+'\nTie')
#if bot plays paper
elif value == 1:
pcplays = '📄'
money -= (bet)
await message.edit(content=userplays+' vs '+pcplays+'\nYou lost '+str(bet))
#if bot plays scissors
elif value == 2:
pcplays = '✂'
money += (bet)
await message.edit(content=userplays+' vs '+pcplays+'\nYou won '+str(bet))
#if message author plays paper
elif reaction.emoji == '📄':
userplays = '📄'
#get bot move
value = randint(0, 2)
if value == 0:
pcplays = '🗿'
money += (bet)
await message.edit(content=userplays+' vs '+pcplays+'\nYou won '+str(bet))
elif value == 1:
pcplays = '📄'
await message.edit(content=userplays+' vs '+pcplays+'\nTie')
elif value == 2:
pcplays = '✂'
money -= (bet)
await message.edit(content=userplays+' vs '+pcplays+'\nYou lost '+str(bet))
#if message author plays rock
elif reaction.emoji == '✂':
userplays = '✂'
#get bot move
value = randint(0, 2)
if value == 0:
pcplays = '🗿'
money -= (bet)
await message.edit(content=userplays+' vs '+pcplays+'\nYou lost '+str(bet))
elif value == 1:
pcplays = '📄'
money += (bet)
await message.edit(content=userplays+' vs '+pcplays+'\nYou won '+str(bet))
elif value == 2:
pcplays = '✂'
await message.edit(content=userplays+' vs '+pcplays+'\nTie')
#if message author does not play in time
except asyncio.TimeoutError:
if userplays == '':
await message.edit(content='timeout')
loopclose = 1
#if message author wants to play red light green light
if 'light' in message.content.lower():
userid = str(message.author.id)
#print game and wait for response
message = await message.channel.send(content='Light: \n:green_circle:')
await message.add_reaction('👍')
def checkfor(reaction, user):
return str(user.id) == userid and str(reaction.emoji) in ['👍']
loopclose = 0
count = 0
timer = randint(3, 10)
while loopclose == 0:
#count message author's input
try:
reaction, user = await client.wait_for('reaction_add', timeout=timer, check=checkfor)
if reaction.emoji == '👍':
count += 1
#when time runs out
except asyncio.TimeoutError:
await message.edit(content='Light: \n:red_circle:')
loopc = 0
donec = count
while loopc == 0:
#if message author gives input after game ends, take away points
try:
reaction, user = await client.wait_for('reaction_add', timeout=2, check=checkfor)
if reaction.emoji == '👍':
donec = 0
except asyncio.TimeoutError:
loopc = 1
if donec == 0 :
money -= (bet)
await message.edit(content='You lost '+str(bet))
elif donec > 0:
money += (bet*donec)
await message.edit(content='You got '+str(donec)+' clicks \nYou won '+str(bet*donec))
loopclose = 1
#update and save users money to text file
for i in users:
if i[0] == username:
i[1] = money
with open('users.txt', 'w') as f:
for _string in users:
f.write(_string[0]+','+str(_string[1])+'\n')
client.run(token) # recall token saved
#client id 775090461152116816
#permission int 8
|
from Integrateur.PhysiqueGenerale import PhysiqueGenerale
from Math.Point import Point
from Math.Vecteur import Vecteur
class ILeapFrog(PhysiqueGenerale):
def __init__(self, objs, dt):
super().__init__(objs)
self.dt = dt
self.first = True
def prochainePosition(self, obj, dt):
return Point(obj.position.posX + obj.vitesse.multiplieParK(dt).compX,
obj.position.posY + obj.vitesse.multiplieParK(dt).compY)
def majObjetPhy(self, obj):
self.acceleration(obj)
if self.first:
obj.vitesse = self.prochaineVitesse(obj, 0.5 * self.dt)
obj.position = self.prochainePosition(obj, self.dt)
self.first = False
else:
obj.vitesse = self.prochaineVitesse(obj, self.dt)
obj.position = self.prochainePosition(obj, self.dt)
|
from __future__ import print_function
import sys
import argparse
from datetime import timedelta
from subprocess import Popen
from time import time, sleep
def red(text):
RED = '\033[91m'
END = '\033[0m'
return RED + text + END
def log(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
class HowLong(object):
def __init__(self):
parser = argparse.ArgumentParser(description='Time a process')
parser.add_argument('-i', type=float, nargs='?', metavar='interval',
help='the timer interval, defaults to 1 second')
parser.add_argument('command', metavar='C', type=str, nargs='+',
help='a valid command')
self.parsed_args = parser.parse_args()
self.timer_interval = self.parsed_args.i if self.parsed_args.i else 1
self.readable_command = " ".join(self.parsed_args.command)
def run(self):
log("Running", self.readable_command)
process = Popen(self.parsed_args.command)
start_time = time()
while process.poll() is None:
sleep(self.timer_interval)
elapsed_time = (time() - start_time) * 1000
log(red(str(timedelta(milliseconds=elapsed_time))))
log("Finished", self.readable_command)
def howlong():
HowLong().run()
if __name__ == "__main__": howlong()
|
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import config
db_path = config["database"]["path"]
db_engine_str = "sqlite:///" + db_path
engine = create_engine(db_engine_str, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import models
Base.metadata.create_all(bind=engine)
def check_db():
# make sure database exists before using it
if not (os.path.exists(db_path)):
parent_path, filename = os.path.split(db_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
init_db()
check_db()
|
import sys
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
mail_server = 'smtp.gmail.com:587' # Mail Server
mail_account = '' # name of mail account
mail_password = '' # password
addr_sender = '' # sender email
addr_receiver = '' # receiver email
verbose_level = 2
debug_level = 0
error_temp = -999
# dictionary with for 1-wire sensors: [sensor name] [1-Wire device]
sensor_dict = { "IN": "28-031463314cff",
"OUT" : "28-031463314cff",
}
#-------------------------------------------------------------------------------------------
def read_sensor(Sensor):
if verbose_level > 2:
print "1++++ read_sensor()"
print "sensor:" , Sensor
if debug_level == 0:
# get 1-Wire id from dictionary
sensor_slave = str(sensor_dict.get(Sensor))
# Open 1-wire slave file
sensor_device = '/sys/bus/w1/devices/' + str(sensor_slave) + '/w1_slave'
if verbose_level > 2:
print "open: ", sensor_device
try:
file = open(sensor_device)
filecontent = file.read() # Read content from 1-wire slave file
file.close() # Close 1-wire slave file
stringvalue = filecontent.split("\n")[1].split(" ")[9] # Extract temperature string
if stringvalue[0].find("YES") > 0:
temp = error_temp
else:
temp = float(stringvalue[2:]) / 1000 # Convert temperature value
# temp=str(temp)
except IOError:
print "PANIC read_sensor - Cannot find file >" + sensor_slave + "< in /sys/bus/w1/devices/"
print "No sensor attached"
print "check with > cat /sys/devices/w1_bus_master1/w1_master_slaves"
temp=("Sensor not attached")
else:
# this is dummy function generating a random number
# ony used for testing purposes
temp = random.randrange(-10, 30, 2) + 0.3
# temp = Sensor + " " + str(temp)
return(temp) # exit function read_sensor
# --------------------------------------------------------------------------------
def send_mail(title,message):
debug_level = 0 # set to 1 to get more messages
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = title
msg['From'] = addr_sender
msg['To'] = addr_receiver
# Create the body of the message (a plain-text and an HTML version).
text = message
html = """\
"""
html += message
html += """\
This is a service provided by raspberry
"""
# print html
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
msg.attach(part1)
msg.attach(part2)
mailsrv = smtplib.SMTP('smtp.gmail.com' , 587)
mailsrv.starttls()
mailsrv.login("sendesrsmail@gmail.com", "senderspassword")
mailsrv.sendmail("sendersmail@gmail.com", "receiversmail@gmail.com", msg.as_string())
mailsrv.quit()
return()
try:
if debug_level > 0: print "smtplib.SMTP:", mail_server
mailsrv = smtplib.SMTP(mail_server) # Send the message via local SMTP server.
except:
print "Error: unable to send email - smtp server"
print "Server on ", mail_server, " cannot be reached or service is down"
return()
try:
if debug_level > 0: print "mailsrv.login:", mail_account, mail_password
mailsrv.login(mail_account,mail_password)
except:
print "Error: unable to send email - login failed"
print "login is not valid - check name and password:",mail_account,mail_password
return()
try:
# sendmail function takes 3 arguments: sender's address, recipient's address and message to send - here it is sent as one string.
if debug_level > 0: print "mailsrv.sendmail:", addr_sender, addr_receiver
mailsrv.sendmail(addr_sender, addr_receiver, msg.as_string())
mailsrv.quit()
print "Successfully sent email"
except:
print "Error: unable to send email - wrong address"
print "mail address for sender or receiver invalid:",addr_sender,addr_receiver
#---------------------------------------------------------------------------------------------
if __name__ == "__main__":
alarm_hi = 3.4 # upper alarm level
alarm_lo = 0.0 # lower alarm level
cur_temp = read_sensor("OUT")
print cur_temp, alarm_hi, alarm_lo
if cur_temp == error_temp:
print "read error - CRC = NO"
else:
if (cur_temp > alarm_hi) or (cur_temp < alarm_lo):
subject = "Critical Warning Alert"
message = "Temperature is: " + str(cur_temp)
print subject, message
send_mail(subject,message)
else:
print "o.k."
sys.exit(0)
|
import math
def power(x,n):
res = 1
x = x%1000000007
while(n>0):
if(n&1):
res = (res*x)%1000000007
n //=2
x = (x*x)%1000000007
return res
def inverse(n):
return power(n,1000000007-2)
def fermet(n,r):
if r==0:
return 1
fact = [0 for i in range(n+1)]
fact[0] = 1
for i in range(1,n+1):
fact[i] = (fact[i-1]*i)%1000000007
return (fact[n]* inverse(fact[r])%1000000007 * inverse(fact[n-r])%1000000007)%1000000007
for _ in range(int(input())):
n = int(input())
a = list(map(int,input().split()))
maxx = -1
mp = dict()
for i in a:
if i not in mp:
mp[i] = 1
else:
mp[i] +=1
maxx = max(maxx,i)
f = mp[maxx]
l = pow(2,n-f)
r = pow(2,f)
if (f%2==0):
s = fermet(f,f//2)
r -=s
r = r%1000000007
print(l*r%1000000007)
|
from zope.interface import Interface
class IChat(Interface):
def initialize(self, username=None):
""" Check if the user is registered, and register if not
"""
def get_uncleared_messages(self, audience='*', mark_cleared=False):
""" Retrieve the uncleared messages from the chat server
"""
def poll(self, username):
""" Poll the chat server to retrieve new online users and chat
messages
"""
def send_message(self, to, message, chat_type='chatbox'):
""" Send a chat message
"""
def clear_messages(self, audience):
""" Mark the messages in a chat contact's messagebox as cleared.
This means that they won't be loaded and displayed again next time
that chat box is opened.
"""
class IChatBox(Interface):
""" """
def reverse_escape(self, html):
""" """
def render_chat_box(self, box_id, user, contact):
""" """
|
# Generated by Django 2.0 on 2018-11-23 04:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hood', '0006_auto_20181122_2043'),
]
operations = [
migrations.RemoveField(
model_name='neighborhood',
name='admin',
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
See http://stackoverflow.com/a/20824134/2401932
'''
import csv
from operator import itemgetter
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import scipy.optimize as optimize
import sys
sys.path.append('.')
from experiments.data import loadDataFile, loadTradesFromDB
def fit(x, a, b, c, d):
return a*np.sin(b*x + c) + d
def fit_with_gradient(x, a, b, c, d, e):
return (a*np.sin(b*x + c) + d) + (e*x)
FIT_FN = fit
PERIOD = 60*60*12
CUTOFF = 10000
DAYSTART = 1388707200 + 11500 + 13000
trades = loadTradesFromDB(daystart=DAYSTART, seconds=PERIOD)
trades = [(ts-DAYSTART, price) for ts, price in trades]
# Select the subset of trades we'll use for prediction
trades_w = [(ts, price) for ts, price in trades if ts < CUTOFF]
xs, ys = zip(*trades_w)
# A linearly increasing sigma; most recent trades get the most weight
s = np.fromiter(range(1, len(xs)+1), int)
# Find our curve fit
fitting_parameters, covariance = optimize.curve_fit(FIT_FN, xs, ys, p0=None, sigma=s)
print("Fitting params: %s" % (fitting_parameters,))
fig, ax = plt.subplots()
ax.grid()
# Plot the full line
full_xs, full_ys = zip(*trades)
ax.plot(full_xs, full_ys)
# Then in a different colour plot the section we actually used for the curve fit
ax.plot(xs, ys)
# Plot the sine wave
sample_xs = np.linspace(0, PERIOD, PERIOD/1000)
sample_ys = FIT_FN(sample_xs, *fitting_parameters)
ax.plot(sample_xs, sample_ys)
# plt.xlim(xs.min(), xs.max())
plt.show()
|
import requests
from email.mime.text import MIMEText
from datetime import date
import smtplib, sys
from datetime import datetime
import time
import subprocess
dtime = datetime.now().strftime("%d-%m-%Y")
#Set frequency with seconds to check vaccine availablity
frequency = 60
# set PINCODE below with 6 digit numeric value
PINCODE = ["560010","560002","560008","560011","560020","560008","560003","560001","560076","560078","560066","560060","560041","560043","560064"]
SMTP_SERVER = "smtp.gmail.com"
SMTP_PORT = 587
#set your personal gmail address, make sure you allow less secure application to use gmail ..
# To enable less secure app
# Go to the Less secure app access section of your Google Account. You might need to sign in.
# Turn Allow less secure apps ON.
SMTP_USERNAME = "********@gmail.com"
SMTP_PASSWORD = "********"
EMAIL_FROM = "*******@gmail.com"
# specify list of comma seprated email address in double quotes
EMAIL_TO = ["********@gmail.com"]
# specify your email address
DATE_FORMAT = "%d/%m/%Y"
EMAIL_SPACE = ", "
DATA='This is the vaccine status check..'
def send_email(sub, emailTolist):
msg = MIMEText(DATA)
msg['Subject'] = sub + " %s" % (date.today().strftime(DATE_FORMAT))
msg['To'] = EMAIL_SPACE.join(emailTolist)
msg['From'] = EMAIL_FROM
mail = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
mail.starttls()
mail.login(SMTP_USERNAME, SMTP_PASSWORD)
mail.sendmail(EMAIL_FROM, emailTolist, msg.as_string())
mail.quit()
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
while (True):
try:
for eachpin in PINCODE:
print ("Trying at cowin site at PINCODE : " + eachpin + "\nAt :: " + str(datetime.now().strftime("%H:%M:%S")))
weburl = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=" + eachpin + "&date=" + dtime
r =requests.get(weburl, headers=headers)
res = r.json()
centers = res['centers']
#print (centers)
for cen in centers:
for ses in cen['sessions']:
if ses['min_age_limit'] < 19:
print ("Location----------------: " + cen['address'])
print ("available_capacity------: " + str(ses['available_capacity']))
if ses['available_capacity'] > 0:
sub = "Covax update: Avaiable at : " + cen['address']
send_email(sub, EMAIL_TO)
print ("Vaccine available, mail sent")
subprocess.call("osascript -e '{}'".format(sub), shell=True)
print ("\n\nNo vaccine available.... trying after " + str(frequency) + " seconds\n\n")
print ("---------------------------------------------------------------")
time.sleep(frequency)
except:
print ("\n....... Some exception occured.... continuing.. \n")
pass
|
def reverseList(li):
length = len(li)
for i in range (length//2):
li[i],li[length-1-i] = li[length-1-i], li[i]
li = [2,4,5,12,53,57]
reverseList(li)
print(li)
def reverseList2(li):
length = len(li)
for i in range(length//2):
li[i],li[-i-1] = li[-i-1],li[i]
li = [32,13,53,25,76,21]
reverseList2(li)
print(li)
li2 = [3,4,56,24,25,61,542,356]
li2 = li2[::-1]
print(li2) |
import multiprocessing
import time
class ReadWriteLock(object):
""" A lock that allows multiple readers and one writer.
Members:
print_lock - lock to print debug information
lock - lock to make ReadWriteLock operations atomic
readGo - Condition Variable that Readers wait on
writeGo - Condition Variable that Writers wait on
activeReaders - # of active Readers (must be >= 0)
activeWriters - # of active Writers (must be >= 0 and <= 1)
waitingReaders - # of waiting Readers (must be >= 0)
waitingWriters - # of waiting Writers (must be >= 0)
"""
def __init__(self):
"""Initialize a ReadWriteLock."""
manager = multiprocessing.Manager()
self._print_lock = manager.Lock()
self._lock = manager.Lock()
self._readGo = manager.Condition(self._lock)
self._writeGo = manager.Condition(self._lock)
self._activeReaders = 0
self._activeWriters = 0
self._waitingReaders = 0
self._waitingWriters = 0
def start_read(self):
"""
Acquire RWLock
Check if ok to Read else wait on cv
Release RWLock
"""
self._print_lock.acquire()
print "Entered start_read"
print time.time()
self._print_lock.release()
self._lock.acquire()
self._print_lock.acquire()
print "Got lock in start_read"
print time.time()
self._print_lock.release()
self._waitingReaders += 1
self.print_state_vars()
while self._read_should_wait():
self._readGo.wait()
self._waitingReaders -= 1
self._activeReaders += 1
self.print_state_vars()
self._lock.release()
def done_read(self):
"""
Acquire RWLock
Decrement number of activeReaders
Notify a waitingWriter
Release RWLock
"""
self._print_lock.acquire()
print "Entered done_read"
print time.time()
self._print_lock.release()
self._lock.acquire()
self._print_lock.acquire()
print "Got lock in done_read"
print time.time()
self._print_lock.release()
self._activeReaders -= 1
print self.print_state_vars()
if self._activeReaders == 0 and self._waitingWriters > 0:
self._writeGo.notify()
self._lock.release()
def start_write(self):
"""
Acquire RWLock
Check if ok to Write else wait on cv
Release RWLock
"""
self._print_lock.acquire()
print "Entered start_write"
print time.time()
self._print_lock.release()
self._lock.acquire()
self._print_lock.acquire()
print "Got lock in start_write"
print time.time()
self._print_lock.release()
self._waitingWriters += 1
self.print_state_vars()
while self._write_should_wait():
self._writeGo.wait()
self._waitingWriters -= 1
self._activeWriters += 1
self.print_state_vars()
self._lock.release()
def done_write(self):
"""
Acquire RWLock
Decrement activeWriters
Wake a waitingWriters if any
If no waitingWriters, wake a waitingReader if any
Release RWLock
"""
self._print_lock.acquire()
print "Entered done_write"
print time.time()
self._print_lock.release()
self._lock.acquire()
self._print_lock.acquire()
print "Got lock in done_write"
print time.time()
self._print_lock.release()
self._activeWriters -= 1
print self.print_state_vars()
if self._waitingWriters > 0:
self._writeGo.notify()
elif self._waitingReaders > 0:
self._readGo.notify_all()
self._lock.release()
def _read_should_wait(self):
"""
Read should wait if theres any active or waiting writers
"""
return self._activeWriters > 0 or self._waitingWriters > 0
def _write_should_wait(self):
"""
Write should wait if there's any active writer or readerss
"""
return self._activeWriters > 0 or self._activeReaders > 0
def print_state_vars(self):
self._print_lock.acquire()
print "Active Readers: {0}".format(self._activeReaders)
print "Active Writers: {0}".format(self._activeWriters)
print "Waiting Readers: {0}".format(self._waitingReaders)
print "Waiting Writers: {0}".format(self._waitingWriters)
self._print_lock.release()
|
"""A module that provides plotters for Vicon markers that have been tracked via biplane fluoroscopy to ascertain the
spatiotemporal syncing accuracy between the Vicon and biplane fluoroscopy systems ."""
import numpy as np
from typing import Sequence, List
import matplotlib.figure
import matplotlib.pyplot as plt
from biplane_kine.graphing.common_graph_utils import make_interactive
from biplane_kine.graphing.smoothing_graph_utils import marker_graph_init, marker_graph_add, marker_graph_title
from biplane_kine.graphing.vicon_accuracy_graph_utils import marker_diff_graph
from biplane_kine.vicon_biplane_diff import BiplaneViconDiff, BiplaneViconSmoothDiff
class ViconAccuracyPlotter:
"""Plotter that enables comparing marker position data as measured by Vicon (raw/labeled) and biplane
fluoroscopy.
Attributes
----------
trial_name: str
Trial identifier.
marker_name: str
Marker identifier.
biplane_vicon_diff: biplane_kine.vicon_biplane_diff.BiplaneViconDiff
Contains differences between marker position data as measured via Vicon and biplane fluoroscopy, and associated
statistics.
vicon_data_raw: numpy.ndarray, (N, 3)
Raw (labeled) Vicon marker data positions.
vicon_indices: numpy.ndarray, (N, )
Zero-based indices for the Vicon frames.
vicon_frames: numpy.ndarray, (N, )
One-based indices for the Vicon frames - useful for plotting.
biplane_data: numpy.ndarray, (N, )
Marker data positions as measured via biplane fluoroscopy.
"""
def __init__(self, trial_name: str, marker_name: str, biplane_vicon_diff: BiplaneViconDiff):
self.trial_name = trial_name
self.marker_name = marker_name
self.biplane_vicon_diff = biplane_vicon_diff
self.vicon_data_raw = biplane_vicon_diff.vmd_fluoro
self.vicon_indices = np.arange(self.vicon_data_raw.shape[0])
self.vicon_frames = self.vicon_indices + 1
self.biplane_data = np.full((self.vicon_data_raw.shape[0], 3), np.nan)
self.biplane_data[biplane_vicon_diff.biplane_marker_data.indices, :] = \
biplane_vicon_diff.biplane_marker_data.data
# biplane_vicon_diff.raw_diff contains differences between Vicon and biplane only at the frames where the
# marker was tracked via biplane fluoroscopy. This doesn't include the entire biplane fluoroscopy trial. This
# is fine, but when graphing it's useful to see the entire trial timeline. Matplotlib doesn't graph NaN values
# so we first create a matrix with all NaN values that encompasses the entire timeline of the biplane
# fluoroscopy trial. Then copy the Vicon/biplane fluoroscopy differences to the frames where the marker was
# measured via biplane fluoroscopy.
self._diff_raw = np.full((self.vicon_data_raw.shape[0], 3), np.nan)
self._diff_raw[biplane_vicon_diff.biplane_marker_data.indices, :] = biplane_vicon_diff.raw_diff
self._diff_raw_scalar = np.full((self.vicon_data_raw.shape[0],), np.nan)
self._diff_raw_scalar[biplane_vicon_diff.biplane_marker_data.indices] = biplane_vicon_diff.raw_diff_scalar
def plot(self) -> List[matplotlib.figure.Figure]:
"""Plot figures quantifying differences in marker position as measured via Vicon versus biplane fluoroscopy.
Figure 0: Trend plots of marker position tracked via biplane fluoroscopy vs Vicon (raw/labeled) overlayed
Figure 1: Trend plots of the difference between marker position data as tracked via biplane fluoroscopy vs
Vicon (raw/labeled)
"""
title = self.trial_name + ' ' + self.marker_name
figs = []
# plot biplane and vicon marker data together
acc_vicon_fig = self.plot_biplane_vicon(title, 0, 'vicon_data_raw', 'Raw')
figs.append(acc_vicon_fig)
# plot difference
diff_fig = self.plot_diff(title, 1, ['_diff_raw', '_diff_raw_scalar'], 'raw')
figs.append(diff_fig)
return figs
def plot_biplane_vicon(self, title: str, fig_num: int, vicon_field: str, vicon_type: str) \
-> matplotlib.figure.Figure:
"""Plot overlayed marker position data as measured via Vicon and biplane fluoroscopy."""
fig = plt.figure(num=fig_num)
ax = fig.subplots(3, 1, sharex=True)
lines_vicon = marker_graph_init(ax, getattr(self, vicon_field), 'Distance (mm)', self.vicon_frames,
color='limegreen', marker='.', lw=1, ms=2)
lines_biplane = marker_graph_add(ax, self.biplane_data, self.vicon_frames, color='indigo', marker='.')
fig.legend((lines_biplane[0], lines_vicon[0]), ('Biplane', vicon_type + ' Vicon'), 'upper right', ncol=3,
columnspacing=0.3, handlelength=1.0)
marker_graph_title(fig, title)
make_interactive()
return fig
def plot_diff(self, title: str, fig_num: int, vicon_fields: Sequence[str], diff_field: str) \
-> matplotlib.figure.Figure:
"""Plot difference between marker position data as measured via Vicon and biplane fluoroscopy."""
fig = plt.figure(num=fig_num)
ax = fig.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]})
lines_xyz = marker_diff_graph(ax[0], getattr(self, vicon_fields[0]), 'Distance (mm)', x_label=None,
x_data=self.vicon_frames)
line_scalar = marker_diff_graph(ax[1], getattr(self, vicon_fields[1]), 'Distance (mm)', None,
self.vicon_frames, color='indigo')
fig.legend(lines_xyz + line_scalar, ('X', 'Y', 'Z', '| |'), loc='lower center', ncol=4, columnspacing=0.3,
handlelength=1.0)
fig.suptitle(title, fontsize=11, fontweight='bold')
plt.tight_layout(pad=1.0, h_pad=0, rect=(0, 0.015, 1, 1))
# add RMS, MAE, Max for each individual x, y, z
text_align = [(0.01, 0.99, 'left', 'top'), (0.99, 0.99, 'right', 'top'), (0.01, 0.01, 'left', 'bottom')]
cc = plt.rcParams['axes.prop_cycle'].by_key()['color']
for n in range(3):
xyz_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\
.format(getattr(self.biplane_vicon_diff, diff_field + '_rms')[n],
getattr(self.biplane_vicon_diff, diff_field + '_mae')[n],
getattr(self.biplane_vicon_diff, diff_field + '_max')[n])
ax[0].text(text_align[n][0], text_align[n][1], xyz_text, ha=text_align[n][2], va=text_align[n][3],
transform=fig.transFigure, fontweight='bold',
bbox=dict(ec=cc[n], fc='None', boxstyle='round', lw=2))
# add RMS, MAE, Max for scalar
scalar_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\
.format(getattr(self.biplane_vicon_diff, diff_field + '_rms_scalar'),
getattr(self.biplane_vicon_diff, diff_field + '_mae_scalar'),
getattr(self.biplane_vicon_diff, diff_field + '_max_scalar'))
ax[0].text(0.99, 0.01, scalar_text, ha='right', va='bottom', transform=fig.transFigure, fontweight='bold',
bbox=dict(ec='indigo', fc='None', boxstyle='round', lw=2))
make_interactive()
return fig
class ViconAccuracySmoothingPlotter(ViconAccuracyPlotter):
"""Plotter that enables comparing marker position data as measured by Vicon (then smoothed) and biplane
fluoroscopy.
Attributes
----------
vicon_data_smoothed: numpy.ndarray, (N, 3)
Smoothed Vicon marker data position.
"""
def __init__(self, trial_name: str, marker_name: str, biplane_vicon_smooth_diff: BiplaneViconSmoothDiff):
super().__init__(trial_name, marker_name, biplane_vicon_smooth_diff)
self.vicon_data_smoothed = biplane_vicon_smooth_diff.smoothed_vmd_fluoro
# see comment in base class as to why the operation below is performed
self._diff_smoothed = np.full((self.vicon_data_raw.shape[0], 3), np.nan)
self._diff_smoothed[biplane_vicon_smooth_diff.biplane_marker_data.indices, :] = \
biplane_vicon_smooth_diff.smoothed_diff
self._diff_smoothed_scalar = np.full((self.vicon_data_raw.shape[0],), np.nan)
self._diff_smoothed_scalar[biplane_vicon_smooth_diff.biplane_marker_data.indices] = \
biplane_vicon_smooth_diff.smoothed_diff_scalar
def plot(self) -> List[matplotlib.figure.Figure]:
"""Plot figures quantifying differences in marker position as measured via Vicon, Vicon (then smoothed), and
biplane fluoroscopy.
Figure 0: Trend plots of marker position tracked via biplane fluoroscopy vs Vicon (raw/labeled) overlayed
Figure 1: Trend plots of the difference between marker position data as tracked via biplane fluoroscopy vs
Vicon (raw/labeled)
Figure 2: Trend plots of marker position tracked via biplane fluoroscopy vs Vicon (smoothed) overlayed
Figure 3: Trend plots of the difference between marker position data as tracked via biplane fluoroscopy vs
Vicon (smoothed)
Figure 4: Figure 1 and 3 overlayed on top of each other
"""
title = self.trial_name + ' ' + self.marker_name
figs = super().plot()
# plot biplane and vicon marker data together
acc_vicon_fig = self.plot_biplane_vicon(title, 2, 'vicon_data_smoothed', 'Smooth ')
figs.append(acc_vicon_fig)
# plot difference
diff_fig = self.plot_diff(title, 3, ['_diff_smoothed', '_diff_smoothed_scalar'], 'smoothed')
figs.append(diff_fig)
# plot all differences in the same plot
diff_all_fig = self.plot_all_diff(title, 4)
figs.append(diff_all_fig)
return figs
def plot_all_diff(self, title: str, fig_num: int) -> matplotlib.figure.Figure:
"""Overlay plot differences in marker position as measured via Vicon and biplane fluoroscopy, and differences
in marker position as measured Vicon (then smoothed) and biplane fluoroscopy."""
fig = plt.figure(num=fig_num)
ax = fig.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]})
lines_xyz_raw = marker_diff_graph(ax[0], self._diff_raw, 'Distance (mm)', x_label=None,
x_data=self.vicon_frames, ls='--')
line_scalar_raw = marker_diff_graph(ax[1], self._diff_raw_scalar, 'Distance (mm)', 'Frame Number',
self.vicon_frames, color='indigo', ls=':')
# reset colors
ax[0].set_prop_cycle(None)
lines_xyz_smooth = marker_diff_graph(ax[0], self._diff_smoothed, 'Distance (mm)', x_label=None,
x_data=self.vicon_frames)
line_scalar_smooth = marker_diff_graph(ax[1], self._diff_smoothed_scalar, 'Distance (mm)', 'Frame Number',
self.vicon_frames, color='indigo')
leg1 = fig.legend(lines_xyz_raw + line_scalar_raw, ('X (Raw)', 'Y', 'Z', '$\\mid \\mid$'), loc='lower left',
handletextpad=0.1, ncol=4, columnspacing=0.5, handlelength=1.0, bbox_to_anchor=(0.0, 0.0))
fig.legend(lines_xyz_smooth + line_scalar_smooth, ('X (Smooth)', 'Y', 'Z', '$\\mid \\mid$'), loc='lower right',
handletextpad=0.1, ncol=4, columnspacing=0.5, handlelength=1.0, bbox_to_anchor=(1.0, 0.0))
fig.add_artist(leg1)
fig.suptitle(title, fontsize=11, fontweight='bold')
plt.tight_layout(pad=1.0, h_pad=0, rect=(0, 0.015, 1, 1))
# add RMS, MAE, Max
raw_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\
.format(self.biplane_vicon_diff.raw_rms_scalar, self.biplane_vicon_diff.raw_mae_scalar,
self.biplane_vicon_diff.raw_max_scalar)
smooth_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\
.format(self.biplane_vicon_diff.smoothed_rms_scalar, self.biplane_vicon_diff.smoothed_mae_scalar,
self.biplane_vicon_diff.smoothed_max_scalar)
ax[0].text(0.01, 0.99, raw_text, ha='left', va='top', transform=fig.transFigure, fontweight='bold',
bbox=dict(ec='indigo', fc='None', boxstyle='round', ls=':', lw=2))
ax[0].text(0.99, 0.99, smooth_text, ha='right', va='top', transform=fig.transFigure, fontweight='bold',
bbox=dict(ec='indigo', fc='None', boxstyle='round', lw=2))
make_interactive()
return fig
|
from Individual import Individual
from Population import Population
from TextProcessing import TextProcessing
import string
import random
import numpy as np
class GeneticAlgorithm:
def __init__(self, population_size, number_of_generations, mutation_rate, cross_chance, encoded_text):
self.population_size = population_size
self.number_of_generations = number_of_generations
self.mutation_rate = mutation_rate
self.cross_chance = cross_chance
self.encoded_text = encoded_text
@staticmethod
def __crossover(individual1_passed, individual2_passed):
child1 = dict.fromkeys(string.ascii_lowercase, 0)
child2 = dict.fromkeys(string.ascii_lowercase, 0)
used_alphabet_child1 = []
used_alphabet_child2 = []
for letter in string.ascii_lowercase:
if random.uniform(0, 1) < 0.5:
temp = individual1_passed
individual1_passed = individual2_passed
individual2_passed = temp
if individual1_passed.mapped_alphabet[letter] not in child1.values():
child1[letter] = individual1_passed.mapped_alphabet[letter]
used_alphabet_child1.append(individual1_passed.mapped_alphabet[letter])
else:
child1[letter] = 'None'
if individual2_passed.mapped_alphabet[letter] not in child2.values():
child2[letter] = individual2_passed.mapped_alphabet[letter]
used_alphabet_child2.append(individual2_passed.mapped_alphabet[letter])
else:
child2[letter] = 'None'
unused_alphabet_child1 = list(set(string.ascii_lowercase) - set(used_alphabet_child1))
unused_alphabet_child2 = list(set(string.ascii_lowercase) - set(used_alphabet_child2))
# i = 0
# j = 0
for alphabet in string.ascii_lowercase:
if child1[alphabet] == 'None':
random_letter = random.randint(0, len(unused_alphabet_child1) - 1)
child1[alphabet] = unused_alphabet_child1[random_letter]
unused_alphabet_child1.remove(unused_alphabet_child1[random_letter])
# i += 1
if child2[alphabet] == 'None':
random_letter = random.randint(0, len(unused_alphabet_child2)-1)
child2[alphabet] = unused_alphabet_child2[random_letter]
unused_alphabet_child2.remove(unused_alphabet_child2[random_letter])
# j += 1
return Individual(list(child1.values())), Individual(list(child2.values()))
def mutation(self, individual):
if random.uniform(0, 1) < self.mutation_rate:
for letter in range(4):
random_letter_1 = random.randint(0, 13)
random_letter_2 = random.randint(13, 25)
temp_letter = individual.mapped_alphabet[string.ascii_lowercase[random_letter_1]]
individual.mapped_alphabet[string.ascii_lowercase[random_letter_1]] = individual.mapped_alphabet[string.ascii_lowercase[random_letter_2]]
individual.mapped_alphabet[string.ascii_lowercase[random_letter_2]] = temp_letter
return individual
def tournament(self, generation):
parents = []
for number_of_parent in range(8):
parents.append(random.choice(generation[:int(self.population_size)]))
parents.sort(key=lambda x: x.fitness, reverse=True)
return parents[0], parents[1]
def evolve(self):
my_text_object_global = TextProcessing(text="", text_file_address="Attachment/global_text.txt")
# my_text_object_encoded = TextProcessing(text="", text_file_address="Attachment/encoded_text.txt")
my_text_object_encoded = TextProcessing(text=self.encoded_text)
text = my_text_object_encoded.text
global_text = my_text_object_global.clean_text()
encoded_text = my_text_object_encoded.clean_text()
generation = Population(self.population_size, True)
max_fitness = generation.find_max_fitness(encoded_text, global_text)
for j in range(self.number_of_generations):
if generation.get_individuals()[0].fitness != max_fitness:
# print("Generation number: ", j)
for chromosome in generation.get_individuals():
chromosome.calculate_fitness(global_text, encoded_text)
generation.get_individuals().sort(key=lambda x: x.fitness, reverse=True)
new_generation = generation.get_individuals()[:int(self.population_size * 0.1)]
for i in range(int(self.population_size * 0.7)):
if random.uniform(0, 1) < self.cross_chance:
while True:
parent1, parent2 = random.choices(new_generation[:int(self.population_size * 0.8)], k=2)
if parent1 != parent2:
break
child1, child2 = self.__crossover(parent1, parent2)
child1 = self.mutation(child1)
child2 = self.mutation(child2)
new_generation.append(child1)
new_generation.append(child2)
generation.save_individuals(new_generation)
# print("Report Best Fitness: ", generation.get_individuals()[0].fitness)
# print("Population number is:", len(generation.get_individuals()))
else:
break
for chromosome in generation.get_individuals():
chromosome.calculate_fitness(my_text_object_global.get_text(), my_text_object_encoded.get_text())
generation.get_individuals().sort(key=lambda x: x.fitness, reverse=True)
return generation.get_individuals()[0].decode_text(text)
|
from fastapi import FastAPI
cinema = FastAPI()
movie_genre = {
'horrors': {
'1': 'Я - легенда! (8.5)',
'2': 'Чужой (8.3)',
'3': 'Мумия (8.3)'
},
'drama': {
'1': 'Зеленая миля (9.1)',
'2': 'FORD против FERRARI (9.0)',
'3': '1+1 (9.0)'
},
'comedy': {
'1': 'Круэлла (8.9)',
'2': 'Король Лев (8.9)',
'3': 'Душа (8.8)'
},
'war': {
'1': 'По соображениям совести (8.5)',
'2': 'Храброе сердце (8.5)',
'3': 'Брестская крепость (8.4)'
}
}
full_list = {
'horrors': {
'1': {
'title': 'Я - легенда!',
'Released': '2007',
'Producer': 'Френсис Лоуренс',
'Rating': '8.5',
'Country': 'USA'
},
'2': {
'title': 'Чужой',
'Released': '1979',
'Producer': 'Ридли Скотт',
'Rating': '8.3',
'Country': 'USA'
},
'3': {
'title': 'Мумия',
'Released': '1999',
'Producer': 'Стивен Соммерс',
'Rating': '8.3',
'Country': 'USA'
}
},
'drama': {
'1': {
'title': 'Зеленая миля',
'Released': '1999',
'Producer': 'Фрэнк Дарабонт',
'Rating': '9.1',
'Country': 'USA'
},
'2': {
'title': 'FORD против FERRARI',
'Released': '2019',
'Producer': 'Джеймс Мэнголд',
'Rating': '9.0',
'Country': 'USA'
},
'3': {
'title': '1+1',
'Released': '2011',
'Producer': 'Оливье Накаш, Эрик Толедано',
'Rating': '9.0',
'Country': 'France'
}
},
'comedy': {
'1': {
'title': 'Круэлла',
'Released': '2021',
'Producer': 'Крэйг Гиллеспи',
'Rating': '8.9',
'Country': 'USA'
},
'2': {
'title': 'Король Лев',
'Released': '1994',
'Producer': 'Роджер Аллерс',
'Rating': '8.9',
'Country': 'USA'
},
'3': {
'title': 'Душа',
'Released': '2020',
'Producer': 'Пит Доктер',
'Rating': '8.8',
'Country': 'USA'
}
},
'war': {
'1': {
'title': 'По соображениям совести',
'Released': '2016',
'Producer': 'Мэл Гибсон',
'Rating': '8.5',
'Country': 'USA'
},
'2': {
'title': 'Храброе сердце',
'Released': '1995',
'Producer': 'Мэл Гибсон',
'Rating': '8.5',
'Country': 'USA'
},
'3': {
'title': 'Брестская крепость',
'Released': '2010',
'Producer': 'Александр Котт',
'Rating': '8.4',
'Country': 'Russia'
}
}}
@cinema.get('/')
def main_page():
greeting = 'Здравствуйте! Добро пожаловать на ' \
'главную страницу нашего сервиса.' \
' Здесь мы можете найти самые лучшие ' \
'фильмы разных жанров! ' \
'help: переходите по /movies'
return greeting
@cinema.get('/movies')
def genre_of_movie():
return 'Выберите жанр фильма, чтобы выполнить запрос:'\
' /horrors - Ужасы, ' \
' /drama - Драма,' \
' /comedy - Комедия, ' \
' /war - Военный, '
@cinema.get('/movies/{genre}')
def open_list(genre):
if genre in movie_genre:
if genre == 'horrors':
return "Вы находитесь в разделе 'Ужасы', " \
"ниже список фильмов по рейтингу %s, " \
"чтобы узнать подробности, переходите по соответствующему номеру" % movie_genre[genre]
elif genre == 'drama':
return "Вы находитесь в разделе 'Драма', " \
"ниже список фильмов по рейтингу %s, " \
"чтобы узнать подробности, переходите по соответствующему номеру" % movie_genre[genre]
elif genre == 'comedy':
return "Вы находитесь в разделе 'Комедия', " \
"ниже список фильмов по рейтингу %s, " \
"чтобы узнать подробности, переходите по соответствующему номеру" % movie_genre[genre]
else:
return "Вы находитесь в разделе 'Военный', " \
"ниже список фильмов по рейтингу %s, " \
"чтобы узнать подробности, переходите по соответствующему номеру" % movie_genre[genre]
else:
return "К сожалению, на нашем сайте, пока нет фильмов по жанру %s (" % genre
@cinema.get('/movies/horrors/{num}')
def open_horrors(num):
if num in movie_genre['horrors']:
return "Ваш фильм по запросу: %s " % full_list['horrors'][num]
else:
return 'В данном списке не нашлось фильма, по номеру %s' % num
@cinema.get('/movies/drama/{num}')
def open_drama(num):
if num in movie_genre['drama']:
return "Ваш фильм по запросу: %s " % full_list['drama'][num]
else:
return 'В данном списке не нашлось фильма, по номеру %s' % num
@cinema.get('/movies/comedy/{num}')
def open_comedy(num):
if num in movie_genre['comedy']:
return "Ваш фильм по запросу: %s " % full_list['comedy'][num]
else:
return 'В данном списке не нашлось фильма, по номеру %s' % num
@cinema.get('/movies/war/{num}')
def open_war(num):
if num in movie_genre['war']:
return "Ваш фильм по запросу: %s " % full_list['war'][num]
else:
return 'В данном списке не нашлось фильма, по номеру %s' % num
|
# -*- coding: utf-8 -*-
"""
绘制3d图形
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def plot_3d(np_3d):
# np 为3维张量
shape = np_3d.shape
for i in range(shape[0]):
# 定义figure
fig = plt.figure()
# 创建3d图形的两种方式
# 将figure变为3d
ax = Axes3D(fig)
# 定义x, y
x = np.arange(0, shape[2], 1)
y = np.arange(0, shape[1], 1)
# 生成网格数据
X, Y = np.meshgrid(x, y)
# 计算每个点对的长度
Z = np_3d[i,:,:] #.flatten()
print(Z.shape)
# 绘制3D曲面
# rstride:行之间的跨度 cstride:列之间的跨度
# rcount:设置间隔个数,默认50个,ccount:列的间隔个数 不能与上面两个参数同时出现
# cmap是颜色映射表
# from matplotlib import cm
# ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, cmap = cm.coolwarm)
# cmap = "rainbow" 亦可
# 我的理解的 改变cmap参数可以控制三维曲面的颜色组合, 一般我们见到的三维曲面就是 rainbow 的
# 你也可以修改 rainbow 为 coolwarm, 验证我的结论
ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, cmap = plt.get_cmap('rainbow'))
plt.show()
def plot_2d(np_2d):
# np 为3维张量
shape = np_2d.shape
for i in range(shape[0]):
plt.figure(i) # 创建了一个figure对象;
# figure对象的add_axes()可以在其中创建一个axes对象,
# add_axes()的参数为一个形如[left, bottom, width, height]的列表,取值范围在0与1之间;
# 我们把它放在了figure图形的上半部分,对应参数分别为:left, bottom, width, height;
plt.plot(np_2d[i, :])
plt.show() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkga.endpoint import endpoint_data
class CreateEndpointGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ga', '2019-11-20', 'CreateEndpointGroup','gaplus')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PortOverridess(self): # RepeatList
return self.get_query_params().get('PortOverrides')
def set_PortOverridess(self, PortOverrides): # RepeatList
for depth1 in range(len(PortOverrides)):
if PortOverrides[depth1].get('ListenerPort') is not None:
self.add_query_param('PortOverrides.' + str(depth1 + 1) + '.ListenerPort', PortOverrides[depth1].get('ListenerPort'))
if PortOverrides[depth1].get('EndpointPort') is not None:
self.add_query_param('PortOverrides.' + str(depth1 + 1) + '.EndpointPort', PortOverrides[depth1].get('EndpointPort'))
def get_HealthCheckEnabled(self): # Boolean
return self.get_query_params().get('HealthCheckEnabled')
def set_HealthCheckEnabled(self, HealthCheckEnabled): # Boolean
self.add_query_param('HealthCheckEnabled', HealthCheckEnabled)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckIntervalSeconds(self): # Integer
return self.get_query_params().get('HealthCheckIntervalSeconds')
def set_HealthCheckIntervalSeconds(self, HealthCheckIntervalSeconds): # Integer
self.add_query_param('HealthCheckIntervalSeconds', HealthCheckIntervalSeconds)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_HealthCheckProtocol(self): # String
return self.get_query_params().get('HealthCheckProtocol')
def set_HealthCheckProtocol(self, HealthCheckProtocol): # String
self.add_query_param('HealthCheckProtocol', HealthCheckProtocol)
def get_EndpointRequestProtocol(self): # String
return self.get_query_params().get('EndpointRequestProtocol')
def set_EndpointRequestProtocol(self, EndpointRequestProtocol): # String
self.add_query_param('EndpointRequestProtocol', EndpointRequestProtocol)
def get_ListenerId(self): # String
return self.get_query_params().get('ListenerId')
def set_ListenerId(self, ListenerId): # String
self.add_query_param('ListenerId', ListenerId)
def get_HealthCheckPath(self): # String
return self.get_query_params().get('HealthCheckPath')
def set_HealthCheckPath(self, HealthCheckPath): # String
self.add_query_param('HealthCheckPath', HealthCheckPath)
def get_EndpointConfigurationss(self): # RepeatList
return self.get_query_params().get('EndpointConfigurations')
def set_EndpointConfigurationss(self, EndpointConfigurations): # RepeatList
for depth1 in range(len(EndpointConfigurations)):
if EndpointConfigurations[depth1].get('Type') is not None:
self.add_query_param('EndpointConfigurations.' + str(depth1 + 1) + '.Type', EndpointConfigurations[depth1].get('Type'))
if EndpointConfigurations[depth1].get('EnableClientIPPreservation') is not None:
self.add_query_param('EndpointConfigurations.' + str(depth1 + 1) + '.EnableClientIPPreservation', EndpointConfigurations[depth1].get('EnableClientIPPreservation'))
if EndpointConfigurations[depth1].get('Weight') is not None:
self.add_query_param('EndpointConfigurations.' + str(depth1 + 1) + '.Weight', EndpointConfigurations[depth1].get('Weight'))
if EndpointConfigurations[depth1].get('EnableProxyProtocol') is not None:
self.add_query_param('EndpointConfigurations.' + str(depth1 + 1) + '.EnableProxyProtocol', EndpointConfigurations[depth1].get('EnableProxyProtocol'))
if EndpointConfigurations[depth1].get('Endpoint') is not None:
self.add_query_param('EndpointConfigurations.' + str(depth1 + 1) + '.Endpoint', EndpointConfigurations[depth1].get('Endpoint'))
def get_EndpointGroupType(self): # String
return self.get_query_params().get('EndpointGroupType')
def set_EndpointGroupType(self, EndpointGroupType): # String
self.add_query_param('EndpointGroupType', EndpointGroupType)
def get_AcceleratorId(self): # String
return self.get_query_params().get('AcceleratorId')
def set_AcceleratorId(self, AcceleratorId): # String
self.add_query_param('AcceleratorId', AcceleratorId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_TrafficPercentage(self): # Integer
return self.get_query_params().get('TrafficPercentage')
def set_TrafficPercentage(self, TrafficPercentage): # Integer
self.add_query_param('TrafficPercentage', TrafficPercentage)
def get_HealthCheckPort(self): # Integer
return self.get_query_params().get('HealthCheckPort')
def set_HealthCheckPort(self, HealthCheckPort): # Integer
self.add_query_param('HealthCheckPort', HealthCheckPort)
def get_ThresholdCount(self): # Integer
return self.get_query_params().get('ThresholdCount')
def set_ThresholdCount(self, ThresholdCount): # Integer
self.add_query_param('ThresholdCount', ThresholdCount)
def get_EndpointGroupRegion(self): # String
return self.get_query_params().get('EndpointGroupRegion')
def set_EndpointGroupRegion(self, EndpointGroupRegion): # String
self.add_query_param('EndpointGroupRegion', EndpointGroupRegion)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
|
# /usr/bin/env python
# Download the twilio-python library from http://twilio.com/docs/libraries
import os
from twilio.rest import Client
# Find these values at https://twilio.com/user/account
account_sid = "ACe4e43683af664305fcc2094c6f181eea"
auth_token = "cfc35a637ea16d3ed060c412cd34a7c6"
client = Client(account_sid, auth_token)
message = client.api.account.messages.create(to="+17345484922",
from_="+17344362074",
body="OFF WITH YOUR HEAAAAD!")
|
import shelve
from django.db.models import F
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from person.models import Person
import rtyaml
class Command(BaseCommand):
help = 'Reports if any legislator twitter bios change.'
storage_fn = 'data/misc/twitter_bios.shelf'
def handle(self, *args, **options):
from website.util import twitter_api_client
tweepy_client = twitter_api_client()
screen_names = list(Person.objects.exclude(twitterid=None)
.values_list("twitterid", "id"))
twitter_id_to_govtrack_id = dict((s.lower(), g) for (s, g) in screen_names)
with shelve.open(self.storage_fn) as data:
while len(screen_names) > 0:
# Take a batch.
batch = screen_names[:100]
screen_names[:len(batch)] = []
for profile in tweepy_client.lookup_users(screen_names=[b[0] for b in batch]):
id = str(twitter_id_to_govtrack_id[profile.screen_name.lower()])
profile = rtyaml.dump({
"govtrack_id": id,
"id": profile.id,
"screen_name": profile.screen_name,
"name": profile.name,
"description": profile.description,
"location": profile.location,
"entities": profile.entities,
"verified": profile.verified,
})
if id not in data or data[id] != profile:
print(profile)
print()
data[id] = profile
|
from django.contrib import admin
from core.models import User, Type, Address, Phone
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
pass
@admin.register(Type)
class TypeAdmin(admin.ModelAdmin):
pass
@admin.register(Address)
class AddressAdmin(admin.ModelAdmin):
pass
@admin.register(Phone)
class PhoneAdmin(admin.ModelAdmin):
pass
|
from AccessControl import Unauthorized
from AccessControl.Permissions import delete_objects
from plone.app.content.browser.contents import ContentsBaseAction
from plone.app.content.interfaces import IStructureAction
from plone.base import PloneMessageFactory as _
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.component import getMultiAdapter
from zope.component.hooks import getSite
from zope.i18n import translate
from zope.interface import implementer
import json
@implementer(IStructureAction)
class DeleteAction:
template = ViewPageTemplateFile("templates/delete.pt")
order = 4
def __init__(self, context, request):
self.context = context
self.request = request
def get_options(self):
return {
"tooltip": translate(_("Delete"), context=self.request),
"id": "delete",
"icon": "plone-delete",
"context": "danger",
"url": self.context.absolute_url() + "/@@fc-delete",
"form": {
"title": translate(_("Delete selected items"), context=self.request),
"submitText": translate(_("Yes"), context=self.request),
"submitContext": "danger",
"template": self.template(),
"closeText": translate(_("No"), context=self.request),
"dataUrl": self.context.absolute_url() + "/@@fc-delete",
},
}
class DeleteActionView(ContentsBaseAction):
required_obj_permission = delete_objects
success_msg = _("Successfully delete items")
failure_msg = _("Failed to delete items")
def __call__(self):
if self.request.form.get("render") == "yes":
confirm_view = getMultiAdapter(
(getSite(), self.request), name="delete_confirmation_info"
)
selection = self.get_selection()
catalog = getToolByName(self.context, "portal_catalog")
brains = catalog(UID=selection, show_inactive=True)
items = [i.getObject() for i in brains]
self.request.response.setHeader(
"Content-Type", "application/json; charset=utf-8"
)
return json.dumps({"html": confirm_view(items)})
else:
return super().__call__()
def action(self, obj):
parent = obj.aq_inner.aq_parent
title = self.objectTitle(obj)
try:
lock_info = obj.restrictedTraverse("@@plone_lock_info")
except AttributeError:
lock_info = None
if lock_info is not None and lock_info.is_locked():
self.errors.append(
_("${title} is locked and cannot be deleted.", mapping={"title": title})
)
return
else:
try:
parent.manage_delObjects(obj.getId())
except Unauthorized:
self.errors.append(
_(
"You are not authorized to delete ${title}.",
mapping={"title": self.objectTitle(self.dest)},
)
)
|
from pytest import raises
from pathlib import Path
from scraper.item_scraper.item_scraper import parse
from scraper.item_scraper.validators import (
ValidationError,
validate_css_selector,
validate_file_path,
validate_task,
)
CACHE_DIR = Path(__file__).parent / 'data' / 'cache_dir'
def test_css_selector_validator():
with raises(ValidationError):
validate_css_selector('|_|_')
def test_file_path_validator(tmp_path):
p = tmp_path / 'some_file.txt'
p.touch()
with raises(ValidationError):
validate_file_path('|_|_')
validate_file_path(p)
validate_file_path(str(p))
def test_parse():
task = {
'item': 'div.card',
'extract': {
'image': 'img.card-img-top|src',
'title': '.card-title|text',
'description': 'p.card-text|text',
'nonExistingField': 'p.blabla|text',
}
}
page_source = (CACHE_DIR / 'https___deals_ellox_science_').read_text()
data = parse(page_source, task)
assert data
assert len(data) == 8
for item in data:
assert item['image'] == '/static/generic.jpg'
assert 'Super' in item['title']
assert '99' in item['description']
assert item['nonExistingField'] is None
|
"""Functions for reading and writing measured light curve data to
SALT2-format files"""
import os
import math
from collections import OrderedDict
import numpy as np
__all__ = ["read", "write", "readdir", "writedir"]
# Reading Metadata
# ----------------
# Names are converted to lowercase, then the following lookup table is used.
SALTMETA_TO_META = {
'redshift': 'z',
'z_heliocentric': 'z_helio'}
# Metadata whose names (after above operations) match these are converted to
# float. Any others are left as strings.
FLOAT_META = ['z', 'z_helio', 'z_cmb', 'mwebv']
# Writing Metadata
# ----------------
# Names are converted to uppercase, then the following lookup table is used
# for both correct capitalization and translation.
META_TO_SALTMETA_OLD = {
'Z': 'Redshift', # 'Redshift' can actually be any case.
'REDSHIFT': 'Redshift',
'Z_HELIO': 'z_heliocentric',
'Z_HELIOCENTRIC': 'z_heliocentric',
'Z_CMB': 'z_cmb'}
META_TO_SALTMETA_NEW = {
'Z': 'REDSHIFT', # Not sure if this is used.
'Z_HELIOCENTRIC': 'Z_HELIO',
'MAGSYS': 'MagSys',
'Z_SOURCE': 'z_source'}
# Reading Columns
# ---------------
# Names are converted to lowercase, then the following lookup table is used.
SALTCOLS_TO_COLS = {
'filter': 'band'}
# Columns whose names match these (after conversion to lowercase and lookup
# table) are left as strings. All others are converted to float.
STRING_COLS = ['band', 'magsys', 'instrument', 'filter']
# Writing Columns
# ---------------
# Names are capitalized (lowercase except first
# letter capitalized) and then the following lookup table is used.
COLS_TO_SALTCOLS = {
'Fluxpsf': 'FluxPsf',
'Fluxpsferr': 'FluxPsferr',
'Airmass': 'AirMass',
'Zp': 'ZP',
'Magsys': 'MagSys',
'Band': 'Filter'}
def _rawmeta_to_meta(meta):
"""Alter an OrderedDict according to some rules."""
newmeta = OrderedDict()
for key, val in meta.iteritems():
key = key.lower()
if key in SALTMETA_TO_META:
key = SALTMETA_TO_META[key]
if key in FLOAT_META:
val = float(val)
newmeta[key] = val
return newmeta
def _rawcols_to_cols(cols):
"""Alter OrderedDict of lists according to some rules."""
newcols = OrderedDict()
for key in cols:
newkey = key.lower()
if newkey in SALTCOLS_TO_COLS:
newkey = SALTCOLS_TO_COLS[newkey]
if newkey in STRING_COLS:
newcols[newkey] = cols[key]
else:
newcols[newkey] = [float(val) for val in cols[key]]
return newcols
def _dict_to_ndarray(d):
"""Convert a dictionary of lists (of equal length) to a structured
numpy.ndarray"""
# first convert all lists to 1-d arrays, in order to let numpy
# figure out the necessary size of the string arrays.
for key in d:
d[key] = np.array(d[key])
# Determine dtype of output array.
dtypelist = []
for key in d:
dtypelist.append((key, d[key].dtype))
# Initialize ndarray and then fill it.
firstkey = d.keys()[0]
col_len = len(d[firstkey])
result = np.empty(col_len, dtype=dtypelist)
for key in d:
result[key] = d[key]
return result
def _read_lightfile(filename):
"""Read lightfile (deprecated format) and return dictionary of keywords.
Comment lines are not allowed.
"""
lightfile = open(filename, 'r')
metadata = OrderedDict()
for line in lightfile.readlines():
trimmedline = line.strip()
if len(trimmedline) == 0: continue
lineitems = trimmedline.split()
if len(lineitems) > 2:
raise ValueError('expect key value pairs in lightfile: '
'{}'.format(filename))
key, val = lineitems
metadata[key] = val
lightfile.close()
return metadata
def _read_dictfile(filename):
"""Read a text data file with SALT-format metadata and header tags.
Such a file has metadata on lines starting with '@' and column names
on lines starting with '#' and containing a ':' after the column name.
Returns
-------
metadata : OrderedDict
File metadata
cols : OrderedDict of lists
data columns
"""
# Initialize output containers.
metadata = OrderedDict()
cols = OrderedDict()
foundend = False
f = open(filename, 'r')
for line in f.readlines():
line = line.strip()
# If a blank line, just skip it.
if len(line) == 0: continue
# If a metadata line:
if line[0] == '@':
pos = line.find(' ') # Find first space in the line.
# It must have a space, and it can't be right after the @.
if pos in [-1, 1]:
raise ValueError('Incorrectly formatted metadata line: '
'{}'.format(line))
key = line[1:pos]
val = line[pos:].strip()
metadata[key] = val
# Comment line, end flag, or column name.
elif line[0] == '#':
# Test for the end flag.
if line[1:].strip(': ') == 'end':
foundend = True
# Test for comment line.
else:
pos = line.find(':')
if pos in [-1, 1]: continue # comment line
colname = line[1:pos].strip()
if colname == '' or ' ' in colname: continue # comment line
if foundend:
raise ValueError('Bizarre file format: header line (#) '
'occuring after #end')
cols[colname] = []
# Otherwise, we are probably reading a dataline.
elif not foundend:
raise ValueError('data line occuring before #end flag.')
else:
lineitems = line.split()
if len(lineitems) != len(cols):
raise ValueError('line length does not match number of '
'named columns in file header')
for i, key in enumerate(cols.keys()):
cols[key].append(lineitems[i])
f.close()
return metadata, cols
def read(filename):
"""Read a new-style SALT2 photometry file.
The new style is available in snfit version >= 2.3.0.
Parameters
----------
filename : str
Returns
-------
photdata : `numpy.ndarray`
Supernova photometric data in a structured array.
metadata : OrderedDict
Supernova metadata.
"""
metadata, cols = _read_dictfile(filename)
# Convert metadata names and values, and column names and values
metadata = _rawmeta_to_meta(metadata)
cols = _rawcols_to_cols(cols)
# Make sure there are columns called `band` and `magsys`.
if not ('band' in cols and 'magsys' in cols):
# Check if this is an old-style file
if ('instrument' in metadata or 'band' in metadata or
'magsys' in metadata):
raise ValueError('Is this an old-style SALT file? '
'To read old-style files use readdir()')
raise ValueError('some mandatory columns missing in file: ' +
filename)
# NOTE: We no longer need to do any of this below
# The filter values come to us formatted as INSTRUMENT::BAND. Parse them.
#cols['instrument'] = []
#cols['band'] = []
#for filtername in cols['filter']:
# items = filtername.split('::')
# if len(items) != 2 or len(items[0]) == 0 or len(items[1]) == 0:
# raise ValueError('improperly formatted filter in file: ' +
# filename)
# cols['instrument'].append(items[0])
# cols['band'].append(items[0])
# We can now remove the filter column because the information is in
# the instrument and band columns.
#del cols['filter']
photdata = _dict_to_ndarray(cols)
return photdata, metadata
def write(photdata, metadata, filename):
"""Write a new-style SALT2 photometry file.
The new style is available in snfit version >= 2.3.0.
Parameters
----------
photdata : `numpy.ndarray` or dictionary of lists
Photometric data, in either stuctured array, or lists representing
columns.
metadata : dict
Supernova metadata.
filename : str
Name of file to write to.
"""
outfile = open(filename, 'w')
# Write metadata.
for key, val in metadata.iteritems():
key = key.upper()
if key in META_TO_SALTMETA_NEW:
key = META_TO_SALTMETA_NEW[key]
outfile.write('@{} {}\n'.format(key, val))
# Photometry data: infer the input data type and convert to ndarray
if isinstance(photdata, dict):
photdata = _dict_to_ndarray(photdata)
if not isinstance(photdata, np.ndarray):
raise ValueError('Invalid data type {0} for photometry data'
.format(type(photdata)))
# Photometry data: convert colnames
colnames = []
for name in photdata.dtype.names:
name = name.capitalize()
if name in COLS_TO_SALTCOLS:
name = COLS_TO_SALTCOLS[name]
colnames.append(name)
# check that the photometry data contain the fields 'filter', and 'magsys'.
if not ('Filter' in colnames and 'MagSys' in colnames):
raise ValueError('photometry data missing required field(s)')
# Combine 'Instrument' and 'Band' fields into one field: 'Filter'
#headernames[headernames.index('Instrument')] = 'Filter'
#headernames.remove('Band')
# Write the headers
for colname in colnames:
outfile.write('#{} :\n'.format(colname))
outfile.write('#end :\n')
# Write the data itself
for i in range(len(photdata)):
for name in photdata.dtype.names:
#if name == 'band': continue
#elif name == 'instrument':
# outfile.write('{}::{} '.format(photdata[i]['instrument'],
# photdata[i]['band']))
#else:
outfile.write('{} '.format(photdata[i][name]))
outfile.write('\n')
outfile.close()
def readdir(dirname, filenames=None):
"""Read old-style SALT2 photometry files for a single supernova.
A file named `lightfile` must be in the directory.
Parameters
----------
dirname : str
The directory containing the files
filenames : list of str (optional)
Only try to read the given filenames as photometry files. If `None`
(default), will try to read all files in directory.
Returns
-------
photdata : `numpy.ndarray`
Structured array containing photometric data.
metadata : OrderedDict
Supernova metadata.
"""
if dirname[-1] == '/': dirname = dirname[0:-1]
if not (os.path.exists(dirname) and os.path.isdir(dirname)):
raise IOError("Not a directory: '{}'".format(dirname))
dirfilenames = os.listdir(dirname)
# Get metadata from lightfile.
if 'lightfile' not in dirfilenames:
raise IOError("no 'lightfile' in directory: '{}'".format(name))
metadata = _read_lightfile(dirname + '/lightfile')
metadata = _rawmeta_to_meta(metadata)
# Get list of filenames to read.
if filenames is None:
filenames = dirfilenames
if 'lightfile' in filenames:
filenames.remove('lightfile') # We already read the lightfile.
fullfilenames = [dirname + '/' + f for f in filenames]
# Read photdata from files.
allcols = None
for filename in fullfilenames:
filemeta, cols = _read_dictfile(filename)
# Check that all necessary metadata was described.
if not ('INSTRUMENT' in filemeta and
'BAND' in filemeta and
'MAGSYS' in filemeta):
raise ValueError('not all necessary global keys are defined')
# Add the metadata to columns, in anticipation of aggregating it
# with other files.
firstkey = cols.keys()[0]
clen = len(cols[firstkey]) # Length of all cols.
cols['Filter'] = \
['{}::{}'.format(filemeta['INSTRUMENT'], filemeta['BAND'])] * clen
cols['MagSys'] = [filemeta['MAGSYS']] * col_len
# Convert column names and values
cols = _rawcols_to_cols(cols)
# If this if the first file, initialize data lists.
if allcols is None:
allcols = cols
# Otherwise, if keys match, append lists...
elif set(allcols.keys()) == set(cols.keys()):
for key in allcols: allcols[key].extend(cols[key])
# and if they do not match, raise Error.
else:
raise ValueError('column names do not match between files')
# Now we have all our data in lists in `allcols`. Convert this to
# a structured numpy array.
photdata = _dict_to_ndarray(allcols)
return photdata, metadata
def writedir(photdata, metadata, dirname):
"""Save photometry data and metadata to old-style SALT files
in a directory.
Parameters
----------
photdata : `numpy.ndarray` or dict
structured array or dictionary of equal-length lists
containing (at least) fields named 'instrument', 'band', 'magsys'
(or some capitalization thereof).
metadata : dict
Dictionary containing metadata to be written to lightfile.
dirname : string
Path to directory.
"""
# Photometry data: infer the input data type and convert to ndarray
if isinstance(photdata, dict):
photdata = _dict_to_ndarray(photdata)
if not isinstance(photdata, np.ndarray):
raise ValueError('Invalid data type {0} for photometry data'
.format(type(photdata)))
# Make the target directory if it doesn't exist.
if not os.path.exists(dirname): os.makedirs(dirname)
# Write metadata to the "lightfile".
outfile = open(dirname + '/lightfile', 'w')
for key, val in metadata.iteritems():
key = key.upper()
if key in META_TO_SALTMETA_OLD:
key = META_TO_SALTMETA_OLD[key]
outfile.write('{} {}\n'.format(key, val))
outfile.close()
# Photometry data:
# require that the photometry data contain the fields 'band' and 'magsys'.
if not ('band' in photdata.dtype.names and
'magsys' in photdata.dtype.names):
raise ValueError('photometry data missing required field(s)')
# On output, each SALT photometry file has a single (band, magsys)
# combination. Find the unique combinations of these in the table.
index_data = photdata[['band', 'magsys']]
unique_combos = np.unique(index_data)
# Get a list of fields besides (band, magsys). These will have data columns
fieldnames = copy.deepcopy(photdata.dtype.names)
fieldnames.remove('band')
fieldnames.remove('magsys')
# Convert remaining colnames.
colnames = []
for name in fieldnames:
name = name.capitalize()
if name in COLS_TO_SALTCOLS:
name = COLS_TO_SALTCOLS[name]
colnames.append(name)
# Create a photometry file for each unique combo
for band, magsys in unique_combos:
# `band` should be formatted like INSTRUMENT::BAND. Parse it.
try:
instrument, oldband = band.split('::')
except ValueError:
raise ValueError('band must be formatted with a double colon (::)')
# Open the file.
filename = '{}/{}_{}_{}.dat'.format(dirname, instrument, oldband,
magsys)
photfile = open(filename, 'w')
# Write header of file.
photfile.write('@INSTRUMENT {}\n'.format(instrument))
photfile.write('@BAND {}\n'.format(oldband))
photfile.write('@MAGSYS {}\n'.format(magsys))
for colname in colnames:
photfile.write('#{} :\n'.format(colname))
photfile.write('#end :\n')
# Find indicies of table matching this combo
idx = ((photdata['band'] == band) &
(photdata['magsys'] == magsys))
matchedrows = photdata[idx] # Get just the rows we want for this file
for i in range(len(matchedrows)):
for key in fieldnames:
photfile.write('{} '.format(matchedrows[i][key]))
photfile.write('\n')
photfile.close() # close this (band, magsys) file.
|
import pygame as pg
from .. import tools, prepare
from ..components.labels import Label
from ..components.course_hole import HOLE_INFO, CourseHole
from ..components.ball import Ball
from ..components.scorecard import Scorecard
class ViewScorecard(tools._State):
def __init__(self):
super(ViewScorecard, self).__init__()
def startup(self, persistent):
self.persist = persistent
self.player = self.persist["player"]
self.music_handler = self.persist["music handler"]
self.card = Scorecard(self.player)
def get_event(self, event):
self.music_handler.get_event(event)
if event.type == pg.QUIT:
self.quit = True
self.player.save()
elif event.type == pg.MOUSEBUTTONUP:
self.player.hole_num += 1
if self.player.hole_num > max(HOLE_INFO.keys()):
self.player.scores = {}
self.player.hole_num = 1
hole = CourseHole(self.player.hole_num)
self.persist["hole"] = hole
self.persist["ball"] = Ball(hole.ball_pos)
self.done = True
self.next = "HOLESTART"
def update(self, dt):
self.music_handler.update()
def draw(self, surface):
surface.blit(self.card.image, self.card.rect) |
# Hopper Disassembler script to insert x86/x64 instructions to return
# an integer from the current procedure. Should handle both 64-bit and
# 32-bit values. Automatically inserts function prologue if its epilogue
# remains unchanged - to avoid inserting the prologue, run this at the
# very beginning of the function so that the epilogue is overwritten.
#
# By Kimmo Kulovesi <http://arkku.com/>, 2015
doc = Document.getCurrentDocument()
seg = doc.getCurrentSegment()
adr = doc.getCurrentAddress()
proc = seg.getProcedureAtAddress(adr)
entry = proc.getEntryPoint() if proc != None else Segment.BAD_ADDRESS
ins = seg.getInstructionAtAddress(adr)
arch = ins.getArchitecture()
if arch in [1, 2]:
suffix = ":" if arch == 2 else " (L suffix forces 64-bit):"
s = Document.ask("Integer value to return"+suffix)
if s != None:
valueSize = 4
if s[-1] == 'L' or s[-1] == 'l':
valueSize = 8
s = s[:-1]
i = int(s, 0)
endProc = adr + 1
while seg.getTypeAtAddress(endProc) == Segment.TYPE_NEXT:
endProc += 1
if (arch == 2 or valueSize < 8) and (i == 1 or i == 0):
# xor eax, eax -> 0
seg.writeByte(adr, 0x31)
seg.writeByte(adr + 1, 0xC0)
seg.markAsCode(adr)
adr += 2
if i == 1:
# inc eax -> 1
seg.writeByte(adr, 0xFF)
seg.writeByte(adr + 1, 0xC0)
seg.markAsCode(adr)
adr += 2
else:
offset = 0
valueChunk = 4
if i > 4294967295 or i < -2147483648:
# 64-bit value
valueSize = 8
if arch == 2:
seg.writeByte(adr, 0x48)
valueChunk = 8
offset = 1
seg.writeByte(adr + offset, 0xB8)
offset += 1
for pos in range(offset, offset + valueChunk):
seg.writeByte(adr + pos, (i & 0xFF))
i >>= 8
seg.markAsCode(adr)
adr += offset + valueChunk
if valueSize > valueChunk:
# 64-bit value on 32-bit architecture
seg.writeByte(adr, 0xBA)
for pos in range(offset, offset + valueChunk):
seg.writeByte(adr + pos, (i & 0xFF))
i >>= 8
seg.markAsCode(adr)
adr += offset + valueChunk
if entry != Segment.BAD_ADDRESS:
if seg.readByte(entry) == 0x55:
if seg.readByte(entry + 1) == 0x48 and \
seg.readByte(entry + 2) == 0x89 and \
seg.readByte(entry + 3) == 0xE5:
# mov rsp, rbp
seg.writeByte(adr, 0x48)
seg.writeByte(adr + 1, 0x89)
seg.writeByte(adr + 2, 0xEC)
seg.markAsCode(adr)
adr += 3
elif seg.readByte(entry + 1) == 0x89 and \
seg.readByte(entry + 2) == 0xE5:
# mov esp, ebp
seg.writeByte(adr, 0x89)
seg.writeByte(adr + 1, 0xEC)
seg.markAsCode(adr)
adr += 2
# pop rbp/ebp
seg.writeByte(adr, 0x5D)
seg.markAsCode(adr)
adr += 1
elif seg.readByte(entry) == 0xC8:
# leave
seq.writeByte(adr, 0xC9)
seg.markAsCode(adr)
adr += 1
seg.writeByte(adr, 0xC3)
seg.markAsCode(adr)
adr += 1
while adr < endProc:
seg.writeByte(adr, 0x90)
seg.markAsCode(adr)
adr += 1
if entry != Segment.BAD_ADDRESS:
seg.markAsProcedure(entry)
else:
print("Unsupported architecture!")
|
from ..Core.commands import Commands
from ..Core.registers import Registers
from ..Runtime.base import Base
from ..Runtime.atoi import Atoi
from .write import Write
class Read(Base):
is_loaded = False
def __init__(self, compiler):
Base.__init__(self, compiler)
if Read.is_loaded:
return
self.load('read.asm', 'read')
Read.is_loaded = True
Write(compiler)
def call(self):
self.compiler.code.add(Commands.CALL, ['read'])
Atoi(self.compiler)
self.compiler.code.add(Commands.MOV, [Registers.EAX, 62])
self.compiler.code.add(Commands.CALL, ['write'])
self.compiler.code.add(Commands.MOV, [Registers.EAX, 32])
self.compiler.code.add(Commands.CALL, ['write'])
self.compiler.code.add(Commands.CALL, ['atoi'])
self.compiler.code.add(Commands.PUSH, Registers.EAX)
|
# coding:utf-8
# __author__ = 'BianXuesheng'
# __data__ = '2016/07/12_13:48 '
from django.conf.urls import url,include
from django.contrib import admin
from webuser.views import index,register,weblogin,weblogout
urlpatterns = [
url(r'^$', index),
url(r'^register/$', register),
url(r'^login/$', weblogin),
url(r'^logout/$', weblogout),
]
|
import cv2
import time
'图像插值'
img = cv2.imread("../images/1.jpg")
rows, cols, channels = img.shape
print(rows, cols, channels) # [w, h, c]
time_start = time.time() # 开始时间
'图片缩放:resize()'
resize1 = cv2.resize(img, dsize=(cols * 2, rows * 2)) # 按尺寸进行缩放
resize2 = cv2.resize(img, dsize=(0, 0), fx=2, fy=2) # 按比例进行缩放
'几种插值方式'
'1. 双线性插值(默认)'
linear = cv2.resize(img, dsize=(rows * 2, cols * 2), interpolation=cv2.INTER_LINEAR)
'2. 最邻近插值'
nearest = cv2.resize(img, dsize=(cols * 2, rows * 2), interpolation=cv2.INTER_NEAREST)
'3. 基于4x4像素邻域内的三次样条插值'
cubic = cv2.resize(img, dsize=(cols * 2, rows * 2), interpolation=cv2.INTER_CUBIC)
'4. 基于8x8像素邻域内的Lanczos插值'
lanczos = cv2.resize(img, dsize=(cols * 2, rows * 2), interpolation=cv2.INTER_LANCZOS4)
'5. 给予局部像素的重采样'
area = cv2.resize(img, dsize=(cols * 2, rows * 2), interpolation=cv2.INTER_AREA)
cv2.imshow("resize1", resize1)
cv2.imshow("resize2", resize2)
cv2.imshow("linear", linear)
cv2.imshow("nearest", nearest)
cv2.imshow("cubic", cubic)
cv2.imshow("lanczos", lanczos)
cv2.imshow("area", area)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
几种常用插值法的效率是:最近邻插值>双线性插值>双立方插值>Lanczos插值
但是效率和结果成反比,所以根据自己的情况斟酌使用
"""
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 10:28:37 2019
Modified on 16/07/2020
@author: luisf
"""
import requests
from bs4 import BeautifulSoup as bs
import time
import pandas as pd
class scrape_anime_bruh:
def __init__(self, url_pattern, pages_to_scrape=1, sleep_interval=-1, content_parser=None):
self.url_pattern = url_pattern
self.pages_to_scrape = pages_to_scrape
self.sleep_interval = sleep_interval
self.content_parser = content_parser
def scrape_url(self, url):
response = requests.get(url)
result = self.content_parser(response.content)
print(result)
return result
def kickstart(self):
scraped = []
for i in range(0, self.pages_to_scrape,50):
try:
scraped.append(self.scrape_url(self.url_pattern % i))
time.sleep(self.sleep_interval)
except:
print('Something bad is happening')
return scraped
def html_links_parser(content):
content = bs(content, 'html.parser')
lst = []
for e in content.find_all('a', {'class':'hoverinfo_trigger fl-l fs14 fw-b'}):
e=str(e)
e=e.split('"')
lst.append(e[3])
content=lst
return content
def web_scrapping(lst):
info_animes = []
for e in lst:
response = requests.get(e, verify=False)
res_text = response.text
soupa = bs(res_text, 'html.parser')
#Title:
title_string = soupa.select('h1 span')
title_string=str(title_string)
time.sleep(2) #Me has salvado la vida
title_string=title_string.split(">")
print(title_string)
title_string=title_string[2].replace("</span", "")
title_string=title_string.replace("<br/", "")
#Type
typo = soupa.select('span + a')
typo = typo[0]
typo =str(typo)
typo =typo.split(">")
typo = typo[1].replace("</a", "")
print(typo)
#####################################################
############### Varios ##########################
#####################################################
soupa_varios=soupa.findAll('div', {'class' :'spaceit'})
#Episodes
ep=str(soupa_varios[0])
ep=ep.split("\n ")
ep=ep[1].replace(" ","")
#Status
status=soupa.find("span", text="Status:").nextSibling
status=str(status).split("\n")[1].strip()
#Airing
aired=str(soupa_varios[1])
aired=aired.split("\n ")
if 'to' in aired[1]:
aired=aired[1].split("to")
start_airing=aired[0].strip()
end_airing=aired[1].strip()
else:
start_airing= aired[1].strip()
end_airing='-'
#Starting season
if typo == 'TV':
sseason=soupa.find("span", text="Premiered:").nextSibling.nextSibling
#if '>' in sseason:
sseason=str(sseason).split(">")[1].split(" ")[0]
else:
sseason = '-'
#Broadcast
if typo == 'TV':
broad=str(soupa_varios[2])
broad=broad.split("\n ")
broad=broad[1].strip()
else:
broad = '-'
#Licensors
if typo == 'TV':
licensor=str(soupa_varios[3])
licensor=licensor.split(">")
licensor=licensor[4].replace('</a','')
else:
licensor=str(soupa_varios[2])
licensor=licensor.split(">")
licensor=licensor[4].replace('</a','')
#Sources
if typo == 'TV':
source=str(soupa_varios[4])
source = source.split("\n ")
source = source[1].strip()
else:
source=str(soupa_varios[3])
source = source.split("\n ")
source = source[1].strip()
#Duration
if typo == 'TV':
duration=str(soupa_varios[5])
duration=duration.split("\n ")
duration=duration[1].strip()
else:
duration=str(soupa_varios[4])
duration=duration.split("\n ")
duration=duration[1].strip()
#Members
if typo == 'TV':
members=str(soupa_varios[7])
members=members.split("\n ")
members=members[1].replace('\n</div>','').strip()
else:
members=str(soupa_varios[6])
members=members.split("\n ")
members=members[1].replace('\n</div>','').strip()
#Producers and Studio
temp_ps=soupa.find('td')
temp_ps=temp_ps.find_all('a')
lst_ps = []
for e in temp_ps:
e=str(e)
if '/anime/producer' in e:
lst_ps.append(e)
lst_ps=[e.split('>')[1].replace('</a','') for e in lst_ps]
if len(lst_ps) == 0:
producers = '-'
studio = '-'
else:
producers = ','.join(lst_ps[:len(lst_ps)-1])
studio = lst_ps[-1]
#Genres
temp_genres=soupa.find('td')
temp_genres=temp_genres.find_all('a')
lst_g=[]
for e in temp_genres:
e=str(e)
if '/anime/genre' in e:
lst_g.append(e)
lst_g=[e.split('>')[1].replace('</a','') for e in lst_g]
genres=','.join(lst_g)
#Rating
rating=soupa.find("span", text="Rating:").nextSibling
rating=str(rating).split("\n")[1].strip()
#Score
score=soupa.find("span", text="Score:").nextSibling.nextSibling
score=str(score).split(">")[1].replace("</span",'')
#Scored by
scored_by=soupa.find("span", text="Ranked:").nextSibling
scored_by = scored_by.split("#")[1]
#Favorites
favorites=soupa.find("span", text="Favorites:").nextSibling
favorites= str(favorites).split("\n")[1].strip()
#Description
description = soupa.find('span', {'itemprop':'description'})
description=str(description).replace('<span itemprop="description">','').replace('<br/>\n<br/>\r\n','').replace('</span>','')
###### Creación del DIC #########
dic = {'Title':title_string,
'Type': typo,
'Episodes': ep,
'Status':status,
'Start_Airing': start_airing,
'End_Airing': end_airing,
'Starting season':sseason,
'Broadcast time': broad,
'Producers':producers,
'Licensors': licensor,
'Studios':studio,
'Sources': source,
'Genres':genres,
'Duration': duration,
'Rating':rating,
'Score':score,
'Scored by': scored_by,
'Members': members,
'Favorites':favorites,
'Description':description
}
info_animes.append(dic)
return info_animes
def dic_to_df(dic):
df=pd.DataFrame(dic)
df = df[['Title', 'Type','Episodes','Status','Start_Airing','End_Airing','Starting season','Broadcast time',
'Producers','Licensors','Studios','Sources','Genres','Duration','Rating','Score','Scored by','Members','Favorites','Description']]
return df |
__all__ = ['click']
# from
# http://martyalchin.com/2008/jan/10/simple-plugin-framework/
from .start import ActionProvider , PluginMount
def list():
return ActionProvider.plugins
import os
from os.path import dirname
basedir = dirname(__file__)
l = os.listdir(basedir)
for i in l:
if i.endswith('.py'):
if not i.startswith('_') and ( not i.startswith('start') ):
try:
n = i.split('.')[0]
__import__(__name__+"."+n)
print(n)
except Exception as e :
print("FAIL ",i,e)
|
from trainers.logger import BaseLogger
from trainers.base import BaseTrainer
from trainers.nae import NAETrainer, NAELogger
def get_trainer(cfg):
# get trainer by specified `trainer` field
# if not speficied, get trainer by model type
trainer_type = cfg.get('trainer', None)
arch = cfg['model']['arch']
device = cfg['device']
if trainer_type == 'nae':
trainer = NAETrainer(cfg['training'], device=device)
else:
trainer = BaseTrainer(cfg['training'], device=device)
return trainer
def get_logger(cfg, writer):
logger_type = cfg['logger']
if logger_type == 'nae':
logger = NAELogger(writer)
else:
logger = BaseLogger(writer)
return logger
|
import random
import sys
class Animal:
__name = ""
def __init__(self, name):
self.__name = name
def setName(self, name):
self.__name = name
def getName(self):
return self.__name
cat = Animal("Jack")
cat.setName("Tommy")
print(cat.getName())
class dog(Animal):
__owner = ""
def __init__(self, owner, name):
self.__owner = owner
super(dog, self).__init__(name)
def setOwner(self, owner):
self.__owner = owner
def setName(self, name):
self.__name = name
def getOwner(self):
return self.__owner
def toString(self):
return "{} is the owner and {} is dog's name".format(self.getOwner(), self.getName())
nai = dog("Vivek", "Jimmy")
print(nai.toString())
'''
print( range(0,6))
print("What is your name? ")
name = sys.stdin.readline();
print("Hai", name)
str2 = [0,1,2,3]
str3 = "abcd1efgh";
'''
'''
list1 = [[5,6], [3,1000], [10]];
print(max(list1))
tuple1 = (5,6,9,5)
print(tuple(list(tuple1)))
tuple1 = (55,566)
print((tuple1.bit_length()))
print(len(tuple1))
dict1 = {'key1':'value1', 'key2':'value2'}
print(len(dict1))
list3 = list(dict1.values());
print(list3[0])
print(list(dict1.items())[0:1])
if 5 > 2:
print('Yes, the value is greater....')
print("\n\ncheck")
for i in range(0,len(list1)):
print(list1)
for k in (0,5):
print(k, ' ' , end='')
''' |
# coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "etl app"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"connexion"
]
setup(
name=NAME,
version=VERSION,
description="etl web app",
author_email="",
url="",
keywords=["Swagger", "etl app"],
install_requires=REQUIRES,
packages=find_packages(),
package_data={"": ["swagger/swagger.yaml"]},
include_package_data=True,
entry_points={"console_scripts": [""]},
long_description="""This is an example development of RESTful API.""",
)
|
"""This module contains the halo abundance calculations.
"""
import numpy as np
from pyccl.halos.hmfunc import MassFuncTinker08
import scipy.integrate as intg
import scipy.interpolate as interp
from scipy.special import erfc
import scipy.stats as stats
from lensing_haloes.cosmo.cosmo import cosmology, dVdz
from pdb import set_trace
def z2a(z):
"""Convert z to a"""
return 1.0 / (1 + z)
def a2z(a):
"""Convert a to z"""
return 1.0 / a - 1
def dndlog10mdz(z, log10_m200m, cosmo=cosmology(), MassFunc=MassFuncTinker08):
"""Return the differential number density of haloes at redshifts z for
masses m200m for the given cosmology, cosmo, and survey area,
A_survey.
Parameters
----------
z : array-like
redshifts
log10_m200m : float or array
log10 of the mass [M_sun / h]
cosmo : pyccl.Cosmology object
cosmology
MassFunc : pyccl.halos.hmfunc.MassFunc object
mass function to use
Returns
-------
dNdlog10mdz : array-like, with (z, m) along axes [h^3 / Mpc^3]
"""
z = np.atleast_1d(z)
m200m = np.atleast_1d(10 ** log10_m200m)
# initialize MassFunc
hmf = MassFunc(cosmo)
# pyccl works without h units
# -> take out mass scaling
# -> add back in scaling in final result
dndlg10mdz = (
np.array(
[
hmf.get_mass_function(cosmo=cosmo, M=m200m / cosmo._params.h, a=a)
for a in z2a(z)
]
)
* (1.0 / cosmo._params.h) ** 3
)
return dndlg10mdz
def dndlog10mdz_mizi(z, log10_m200m, cosmo=cosmology(), MassFunc=MassFuncTinker08):
"""Return the differential number density of haloes for each (z, m200m) pair
for the given cosmology, cosmo, and survey area, A_survey.
Parameters
----------
z : array-like
redshifts
log10_m200m : float or array
log10 of the mass [M_sun / h]
cosmo : pyccl.Cosmology object
cosmology
MassFunc : pyccl.halos.hmfunc.MassFunc object
mass function to use
Returns
-------
dndlog10mdz : array-like [h^3 / Mpc^3]
"""
z = np.atleast_1d(z)
m200m = np.atleast_1d(10 ** log10_m200m)
if z.shape != m200m.shape:
raise ValueError("z and m200m need to have the same shape.")
# initialize MassFunc
hmf = MassFunc(cosmo)
# pyccl works without h units
# -> take out mass scaling
# -> add back in scaling in final result
dndlg10mdz = (
np.array(
[
hmf.get_mass_function(cosmo=cosmo, M=m / cosmo._params.h, a=a)
for (m, a) in zip(m200m, z2a(z))
]
)
* (1 / cosmo._params.h) ** 3
)
return dndlg10mdz
def dNdlog10mdz(
z, log10_m200m, cosmo=cosmology(), A_survey=2500, MassFunc=MassFuncTinker08
):
"""Return the differential number of haloes at redshifts z for masses
m200m for the given cosmology, cosmo, and survey area, A_survey.
Parameters
----------
z : array-like
redshifts
log10_m200m : float or array
log10 of the mass [M_sun / h]
cosmo : pyccl.Cosmology object
cosmology
A_survey : float
survey area [deg^2]
MassFunc : pyccl.halos.hmfunc.MassFunc object
mass function to use
Returns
-------
dNdlog10mdz : array-like, with (z, m) along axes [h^3 / Mpc^3]
"""
z = np.atleast_1d(z)
dndlg10mdz = dndlog10mdz(
z=z, log10_m200m=log10_m200m, cosmo=cosmo, MassFunc=MassFunc
)
volume = A_survey * dVdz(
z=z, omega_m=cosmo._params.Omega_m, h=cosmo._params.h, w0=cosmo._params.w0
)
return dndlg10mdz * volume.reshape(-1, 1)
def dNdlog10mdz_mizi(
z, log10_m200m, cosmo=cosmology(), A_survey=2500, MassFunc=MassFuncTinker08
):
"""Return the differential number of haloes for each (z, m200m) pair
for the given cosmology, cosmo, and survey area, A_survey.
Parameters
----------
z : array-like
redshifts
log10_m200m : float or array
log10 of the mass [M_sun / h]
cosmo : pyccl.Cosmology object
cosmology
A_survey : float
survey area [deg^2]
MassFunc : pyccl.halos.hmfunc.MassFunc object
mass function to use
Returns
-------
dNdlog10mdz : array-like [h^3 / Mpc^3]
"""
z = np.atleast_1d(z)
m200m = np.atleast_1d(10 ** log10_m200m)
if z.shape != m200m.shape:
raise ValueError("z and m200m need to have the same shape.")
dndlg10mdz = dndlog10mdz_mizi(
z=z, log10_m200m=log10_m200m, cosmo=cosmo, MassFunc=MassFunc
)
volume = A_survey * dVdz(
z=z, omega_m=cosmo._params.Omega_m, h=cosmo._params.h, w0=cosmo._params.w0
)
return dndlg10mdz * volume.reshape(-1)
def dNdlog10mdz_integral(
z_min=0.25,
z_max=10,
n_z=100,
log10_m200m_min=np.log10(3e14),
log10_m200m_max=18,
log10_mobs_min=None,
log10_mobs_max=None,
n_m=400,
cosmo=cosmology(),
A_survey=2500,
MassFunc=MassFuncTinker08,
sigma_log10_mobs=None,
sigma_log10_mobs_dist=None,
**sigma_log10_mobs_dist_kwargs
):
"""Return the integral of the total number of objects expected in a
survey of area A_survey [deg^2]
Parameters
----------
z_min : float
lowest redshift in sample
z_max : float
maximum redshift in integration
[Default: 10]
n_z : int
number of redshifts to sample
log10_m200m_min : float
log10 of the minimum mass in the survey [M_sun / h]
log10_m200m_max : float
log10 of the maximum mass in the integration [M_sun / h]
[Default: 18]
log10_mobs_min : float
log10 of the observed mass bin [M_sun / h]
[Default : None]
log10_mobs_max : float
log10 of the observed mass bin [M_sun / h]
[Default : None]
sigma_log10_mobs : array-like or float
range of sigma_log10_m_obs
[Default : None]
sigma_log10_mobs_dist : callable
distribution for sigma_log10_mobs
[Default : None]
n_m : int
number of masses to sample
cosmo : pyccl.Cosmology object
cosmology
A_survey : float
survey area [deg^2]
MassFunc : pyccl.halos.hmfunc.MassFunc object
mass function to use
Returns
-------
N : float
number of expected objects in survey
"""
if sigma_log10_mobs_dist is not None:
if (
not isinstance(sigma_log10_mobs_dist, stats.rv_continuous)
or not hasattr(sigma_log10_mobs_dist, 'pdf')
):
raise TypeError(f'sigma_log10_mobs_dist should be stats.rv_continuous or should have pdf method')
z_range = np.linspace(z_min, z_max, n_z)
# create initial range to calculate hmf that will not crash
log10_m = np.linspace(log10_m200m_min, 16, n_m)
dNdlg10mdz_range = dNdlog10mdz(
z=z_range,
log10_m200m=log10_m,
A_survey=A_survey,
cosmo=cosmo,
MassFunc=MassFunc,
)
# now create interpolator that will linearly extrapolate the result
interp_func = interp.interp1d(
log10_m,
np.log10(dNdlg10mdz_range),
kind="linear",
fill_value="extrapolate",
axis=1,
)
log10_m_full = np.linspace(log10_m200m_min, log10_m200m_max, n_m)
if sigma_log10_mobs is None or log10_mobs_max is None or log10_mobs_min is None:
conv_obs = 1.
else:
sigma_log10_mobs = np.atleast_2d(sigma_log10_mobs)
xi = (
(log10_mobs_min - log10_m_full)[..., None]
/ (2 * sigma_log10_mobs ** 2)**0.5
)
xiplusone = (
(log10_mobs_max - log10_m_full)[..., None]
/ (2 * sigma_log10_mobs ** 2)**0.5
)
erfc_term = 0.5 * (erfc(xi) - erfc(xiplusone))
if sigma_log10_mobs.shape[-1] == 1:
conv_obs = erfc_term.reshape(-1)
else:
conv_obs = intg.simps(
y=erfc_term * sigma_log10_mobs_dist.pdf(
sigma_log10_mobs, **sigma_log10_mobs_dist_kwargs
),
x=sigma_log10_mobs, axis=-1
)
dNdlg10mdz_full = 10 ** interp_func(log10_m_full) * conv_obs
# the interpolator returns nan for np.log10(0), these values should be 0
dNdlg10mdz_full[np.isnan(dNdlg10mdz_full)] = 0.0
# now integrate the m and z dimensions
Nz = intg.simps(y=dNdlg10mdz_full, x=log10_m_full, axis=1)
N = intg.simps(y=Nz, x=z_range)
return N
def N_in_bins(
z_bin_edges,
m200m_bin_edges,
n_z=50,
n_m=1000,
cosmo=cosmology(),
A_survey=2500,
MassFunc=MassFuncTinker08,
pool=None,
sigma_log10_mobs=None,
sigma_log10_mobs_dist=None,
**kwargs
):
"""Return the integral of the total number of objects expected in a
survey of area A_survey [deg^2]
Parameters
----------
z_bin_edges : (z,) array
redshift bins
m200m_bin_edges : (m,) array
mass bins
n_z : int
number of redshifts to sample
n_m : int
number of masses to sample
cosmo : pyccl.Cosmology object
cosmology
A_survey : float
survey area [deg^2]
MassFunc : pyccl.halos.hmfunc.MassFunc object
mass function to use
pool : multiprocessing pool or None
sigma_log10_mobs : array-like or float
uncertainty on the mass
sigma_log10_mobs_dist : callable
scipy.stats distribution for sigma_log10_mobs
[Default : None]
Returns
-------
N : (z, m) array
number of expected objects in survey
"""
z_mins, m_mins = np.meshgrid(z_bin_edges[:-1], m200m_bin_edges[:-1])
z_maxs, m_maxs = np.meshgrid(z_bin_edges[1:], m200m_bin_edges[1:])
# prepare coordinates to be passed to N
coords = np.concatenate(
[
z_mins.ravel().reshape(-1, 1),
z_maxs.ravel().reshape(-1, 1),
m_mins.ravel().reshape(-1, 1),
m_maxs.ravel().reshape(-1, 1),
],
axis=-1,
)
def N(edges):
z_min, z_max, m_min, m_max = edges
if sigma_log10_mobs is None:
log10_m200m_min = np.log10(m_min)
log10_m200m_max = np.log10(m_max)
log10_mobs_min = None
log10_mobs_max = None
else:
log10_m200m_min = np.log10(m_min) - 2.5 * np.max(sigma_log10_mobs)
log10_m200m_max = np.log10(m_max) + 2.5 * np.max(sigma_log10_mobs)
log10_mobs_min = np.log10(m_min)
log10_mobs_max = np.log10(m_max)
return dNdlog10mdz_integral(
z_min=z_min,
z_max=z_max,
n_z=n_z,
log10_m200m_min=log10_m200m_min,
log10_m200m_max=log10_m200m_max,
log10_mobs_min=log10_mobs_min,
log10_mobs_max=log10_mobs_max,
sigma_log10_mobs=sigma_log10_mobs,
sigma_log10_mobs_dist=sigma_log10_mobs_dist,
n_m=n_m,
cosmo=cosmo,
A_survey=A_survey,
MassFunc=MassFunc,
**kwargs
)
if pool is not None:
map_fn = pool.map
else:
map_fn = map
return np.asarray(list(map_fn(N, coords))).reshape(z_mins.shape).T
|
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
# pylint: disable=deprecated-module
import imp
import logging
import os
import pdb
import sys
import traceback
# Avoid expensive imports here as load times directly add to runs.
from guild import exit_code
from guild import op_util
from guild import util
log = None # initialized in _init_logging
__argv0 = sys.argv
NO_FLAGS_DEST = object()
class Debugger(pdb.Pdb):
def __init__(self):
pdb.Pdb.__init__(self)
# Setting skip to True violates the Pdb interface, which
# expected a list of globs, but we don't have such a list and
# really just need a truthy value to trigger a call to
# is_skipped_module, which implements the actual skip logic.
self.skip = True
def is_skipped_module(self, module_name):
return module_name != "__main__"
class ModuleInfo:
def __init__(self, mod_path, package):
self.mod_path = mod_path
self.package = package
def main():
if os.getenv("PROFILE"):
_profile_main()
else:
_main()
def _profile_main():
import cProfile
import tempfile
p = cProfile.Profile()
sys.stderr.write("Profiling operation\n")
p.enable()
try:
_main()
finally:
p.disable()
_fd, tmp = tempfile.mkstemp(prefix="guild-op-profile-")
sys.stderr.write(f"Writing guild.op_main profile stats to {tmp}\n")
p.dump_stats(tmp)
sys.stderr.write(
f"Use 'python -m pstats {tmp}' or 'snakeviz {tmp}' to view stats\n"
)
def _main():
_init_sys_path()
_init_logging()
_init_warnings()
log.debug("cwd: %s", os.getcwd())
log.debug("sys.path: %s", os.path.pathsep.join(sys.path))
arg1, rest_args = _parse_args()
_apply_plugins()
_try_module(arg1, rest_args)
def _init_sys_path():
if os.getenv("SCRIPT_DIR") is not None:
sys.path[0] = os.getenv("SCRIPT_DIR")
elif os.getenv("NO_SYS_PATH_MODIFY") != "1":
# Remove cwd from Python path to rely on `PYTHONPATH` config
# exclusively
del sys.path[0]
def _init_logging():
op_util.init_logging()
globals()["log"] = logging.getLogger("guild")
def _init_warnings():
if log.getEffectiveLevel() > logging.DEBUG:
warnings.simplefilter("ignore", Warning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
def _parse_args():
if len(sys.argv) < 2:
_internal_error("missing required arg\n")
return sys.argv[1], sys.argv[2:]
def _apply_plugins():
plugins = os.getenv("GUILD_PLUGINS")
if not plugins:
return
_fix_guild_model_finder()
for name in plugins.split(","):
_apply_plugin(name)
def _fix_guild_model_finder():
"""Preemptively loads `guild.model` without registering a model finder.
For reasons that date back to early development, Guild uses
Python's `sys.path_hooks` to register a model finder that resolved
Guild file operations via Python's module loading mechanism. This
facility is slated for removal, however, in the meantime there is
an issue running Python operations that load packages (submodules)
that contain Guild files.
The workaround is to load `guild.module` with the environment
variable `NO_GUILD_MODEL_FINDER` set to `1`.
"""
with util.Env({"NO_GUILD_MODEL_FINDER": "1"}):
import guild.model as _
def _apply_plugin(name):
plugin = _plugin_for_name(name)
log.debug("patching env with plugin %r", name)
plugin.patch_env()
def _plugin_for_name(name):
from guild import plugin # expensive
return plugin.for_name(name)
def _try_module(arg1, args):
module_spec = arg1.replace(os.path.sep, "/")
package_path, module = _parse_module_spec(module_spec)
if package_path:
# This move is controversial - see *Python path and sub-directories* in
# [pythonpath.md](../tests/pythonpath.md) for behavior and a note on
# why we might want to rethink this.
package_path = _try_resolve_package_path(package_path)
log.debug("using package path '%s'", package_path)
sys.path.insert(1, package_path)
log.debug("finding module '%s'", module)
try:
module_info = _find_module(module)
except ImportError as e:
_error(str(e))
else:
_dispatch_module_exec(_flags_interface(args), module_info)
def _parse_module_spec(spec):
parts = spec.rsplit("/", 1)
if len(parts) == 2:
return parts[0], parts[1]
return None, parts[0]
def _try_resolve_package_path(package_path):
for path in sys.path:
maybe_resolved = os.path.join(path, package_path)
if os.path.exists(maybe_resolved):
return maybe_resolved
return package_path
def _find_module(module):
"""Find module using imp.find_module.
While imp is deprecated, it provides a Python 2/3 compatible
interface for finding a module. We use the result later to load
the module with imp.load_module with the '__main__' name, causing
it to execute.
The non-deprecated method of using importlib.util.find_spec and
loader.execute_module is not supported in Python 2.
The _find_module implementation uses a novel approach to bypass
imp.find_module's requirement that package directories contain
__init__.py/__init__.pyc markers. This lets users specify
namespace packages in main modules, which are not otherwise
supported by imp.find_module.
"""
parts = module.split(".")
module_path = parts[0:-1]
package = ".".join(module_path)
module_name_part = parts[-1]
# See function docstring for the rationale of this algorithm.
for sys_path_item in sys.path:
cur_path = os.path.join(sys_path_item, *module_path)
try:
f, path, _desc = imp.find_module(module_name_part, [cur_path])
except ImportError:
pass
else:
if f:
f.close()
else:
path = _find_package_main(path)
if path is None:
raise ImportError(
f"No module named {module}.__main__ ('{module}' is "
"a package and cannot be directly executed)"
)
return ModuleInfo(path, package)
raise ImportError(f"No module named {module}")
def _find_package_main(mod_path):
names = ["__main__.py", "__main__.pyc"]
for name in names:
path = os.path.join(mod_path, name)
if os.path.exists(path):
return path
return None
def _flags_interface(args):
dest = os.getenv("FLAGS_DEST", "args")
if dest == "args" or dest.startswith("args:"):
return _args_dest(args)
if dest == "globals":
return _globals_dest(args)
if dest.startswith("global:"):
return _global_dict_dest(args, dest[7:])
if dest.startswith("dict:"):
return _global_dict_dest(args, dest[5:])
if dest.startswith("namespace:"):
return _global_simple_namespace_dest(args, dest[10:])
if dest == "none":
return _no_dest_args(args)
return _unknown_dest(dest, args)
def _args_dest(args):
# Strip last occurring `--` from args
flag_args, base_args = op_util.split_args_for_flags(args)
return "args", base_args + flag_args, {}
def _globals_dest(args):
base_args, flags = _base_args_and_flags_for_globals(args)
return "globals", base_args, flags
def _base_args_and_flags_for_globals(args):
flags, other_args = op_util.args_to_flags(args)
return other_args, flags
def _global_dict_dest(args, global_name):
base_args, flags = _base_args_and_flags_for_globals(args)
global_config = {}
util.apply_nested_config(flags, global_config)
global_dest = op_util.global_dest(global_name, global_config)
return "globals", base_args, global_dest
def _global_simple_namespace_dest(args, global_name):
_dest, base_args, global_dest = _global_dict_dest(args, global_name)
_convert_global_dict_to_namespace(global_dest)
return "globals", base_args, global_dest
def _convert_global_dict_to_namespace(global_dest):
assert len(global_dest) == 1, global_dest
for global_name, namespace_kw_dict in global_dest.items():
global_dest[global_name] = _dict_to_simple_namespace(namespace_kw_dict)
break
def _dict_to_simple_namespace(d):
from guild import python_util
kw = {name: _dict_to_simple_namespace_or_val(val) for name, val in d.items()}
return python_util._SimpleNamespace(kw)
def _dict_to_simple_namespace_or_val(val):
if isinstance(val, dict):
return _dict_to_simple_namespace(val)
return val
def _no_dest_args(args):
_flag_args, base_args = op_util.split_args_for_flags(args)
return NO_FLAGS_DEST, base_args, {}
def _unknown_dest(dest, args):
log.debug("guild.op_main ignoring flags dest %r", dest)
return _no_dest_args(args)
def _dispatch_module_exec(flags_interface, module_info):
_maybe_test_internal_error()
dest, args, flags = flags_interface
if dest in ("args", NO_FLAGS_DEST):
_exec_module(module_info, args)
elif dest == "globals":
_exec_module(module_info, args, flags)
else:
assert False, flags_interface
def _maybe_test_internal_error():
# Simulate an internal error by checking env for a special
# variable. This is used by Guild tests to verify internal error
# handling.
assert os.getenv("__GUILD_OP_MAIN_INTERNAL_ERROR") != "1"
def _exec_module(module_info, args, globals=None):
from guild import python_util
_set_argv_for_module_with_args(module_info, args)
def exec_script():
mod_name = _module_name_for_info(module_info)
log.debug("loading module from '%s'", module_info.mod_path)
python_util.exec_script(module_info.mod_path, globals, mod_name=mod_name)
_gen_exec(exec_script, module_info)
def _gen_exec(exec_cb, module_info):
pdb_commands = _pdb_commands(module_info)
if pdb_commands:
debugger = Debugger()
debugger.rcLines.extend(pdb_commands)
debugger.runcall(exec_cb)
else:
exec_cb()
def _pdb_commands(module_info):
try:
encoded_breaks = os.environ["PDB_BREAKS"]
except KeyError:
return []
else:
unresolved_breaks = util.shlex_split(encoded_breaks)
commands = [
_pdb_break_cmd(_resolve_break(b, module_info)) for b in unresolved_breaks
]
commands.append(_pdb_continue_cmd())
return commands
def _pdb_break_cmd(location):
return f"break {location}"
def _pdb_continue_cmd():
return "continue"
def _resolve_break(b, module_info):
import re
if re.match(r"[0-9]+", b):
return _module_break(module_info.mod_path, int(b))
if re.match(r".+?:[0-9]$", b):
path, line = b.rsplit(":", 2)
return _module_break(path, int(line), module_info.mod_path)
return b
def _module_break(path, want_line, main_mod=None):
from guild import python_util
if not os.path.isabs(path):
path = _resolve_path_for_break(path, main_mod)
try:
next_line = python_util.next_breakable_line(path, want_line)
except TypeError:
if want_line > 1:
# Try first available breakpoint
return _module_break(path, 1)
return f"{path}:{want_line}"
else:
return f"{path}:{next_line}"
def _resolve_path_for_break(path, main_mod):
debugger = pdb.Pdb()
debugger.mainpyfile = main_mod or ''
return debugger.lookupmodule(path)
def _set_argv_for_module_with_args(module_info, args):
sys.argv = [module_info.mod_path] + args
log.debug("argv: %s", sys.argv)
def _module_name_for_info(module_info):
"""Returns module name for module info.
If module info contains a package, returns `<package.__main__`,
otherwise returns `__main__`.
"""
if module_info.package:
return f"{module_info.package}.__main__"
return "__main__"
def _internal_error(msg):
sys.stderr.write(f"guild.op_main: {msg}\n")
sys.exit(exit_code.INTERNAL_ERROR)
def _error(msg):
sys.stderr.write(f"guild: {msg}\n")
sys.exit(exit_code.DEFAULT_ERROR)
if __name__ == "__main__":
try:
main()
except (Exception, KeyboardInterrupt) as e:
if log.getEffectiveLevel() <= logging.DEBUG:
raise
exc_lines = traceback.format_exception(*sys.exc_info())
if len(exc_lines) < 3 or len(__argv0) < 2:
# Assertion failure, but we want to be defensive in
# deference to the actual error.
raise
# Print exception start with mod (argv[0])
filtered_exc_lines = []
this_dir = os.path.dirname(__file__)
for line in exc_lines[1:]:
if filtered_exc_lines or this_dir not in line:
filtered_exc_lines.append(line)
if not filtered_exc_lines:
raise
sys.stderr.write(exc_lines[0])
for line in filtered_exc_lines:
sys.stderr.write(line)
if os.getenv("BREAK_ON_ERROR") == "1":
sys.stderr.write("Entering post mortem debug session\n")
pdb.post_mortem()
exit_code = e.code if isinstance(e, SystemExit) else 1
sys.exit(exit_code)
|
import tensorflow as tf
import random
from DeepQNet.readData import *
class Env:
def __init__(self, normalized: np.ndarray, time_series: tuple, actions: np.ndarray,
input_size: int=50, transaction_cost = 0.0005):
self.name = "env1"
self.normalized = normalized
self.price_diff = time_series[0]
self.price = time_series[1]
self.length = len(time_series[0])
self.input_size = input_size
self.index = 0
self.actions = actions
self.prev_action = 1
self.tc = transaction_cost
def state(self):
state = self.normalized[self.index : self.index + self.input_size]
price_diff = self.price_diff[self.index : self.index + self.input_size]
price = self.price[self.index : self.index + self.input_size]
return state, price_diff, price
def reset(self):
self.index = 0
return self.state()[0]
def step(self, action):
self.index += 1
done = False
if self.index + self.input_size + 1 > self.length:
done = True
new_state, diff_, price_ = self.state()
reward = diff_[-1] * self.actions[action]
if self.actions[action] != self.actions[self.prev_action]:
reward -= (self.tc * price_[-1]) * abs(self.actions[action] - self.actions[self.prev_action])
self.prev_action = action
return new_state, reward, done
def action_sample(self) -> int:
return np.random.randint(0, len(self.actions))
class Env2:
def __init__(self, normalized: np.ndarray, time_series: tuple, actions: np.ndarray,
input_size: int=128, transaction_cost=0.0005):
self.name = "env2"
self.normalized = normalized
self.price_diff = time_series[0]
self.price = time_series[1]
self.length = len(time_series[0])
self.input_size = input_size
self.index = 0
self.actions = actions
self.prev_action = 1
self.tc = transaction_cost
def state(self):
state = np.append(self.normalized[self.index : self.index + self.input_size], self.actions[self.prev_action])
price_diff = self.price_diff[self.index : self.index + self.input_size]
price = self.price[self.index : self.index + self.input_size]
return state, price_diff, price
def reset(self):
self.index = 0
return self.state()[0]
def step(self, action):
self.index += 1
done = False
if self.index + self.input_size + 1 > self.length:
done = True
new_state, diff_, price_ = self.state()
reward = diff_[-1] * self.actions[action]
if self.actions[action] != self.actions[self.prev_action]:
reward -= (self.tc * price_[-1]) * abs(self.actions[action] - self.actions[self.prev_action])
self.prev_action = action
return new_state, reward, done
def action_sample(self) -> int:
return np.random.randint(0, len(self.actions))
|
num=int(input("Enter a number:"))
if(num%3)==0:
print("Enter a number is even")
else:
print("Enter a number is odd") |
from etcetera import api
import argparse
import logging
def main():
parser = argparse.ArgumentParser(prog='etc', description='etcetera: managing cloud-hosted datasets')
parser.add_argument('-v', '--verbose', action='store_true', help='Print debug info')
subparsers = parser.add_subparsers(dest='cmd', help='command')
parser_ls = subparsers.add_parser('ls', help='List datasets')
parser_ls.add_argument('-r', '--remote', action='store_true', help='List remote repository')
parser_register = subparsers.add_parser('register', help='Register directory as a dataset')
parser_register.add_argument('directory', help='dataset directory')
parser_register.add_argument('name', default=None, help='dataset name')
parser_register.add_argument('-f', '--force', action='store_true', help='Force override if local dataset exists')
parser_pull = subparsers.add_parser('pull', help='Pull dataset from repository')
parser_pull.add_argument('name', help='Dataset name')
parser_pull.add_argument('-f', '--force', action='store_true', help='Force download even if local dataset exists')
parser_push = subparsers.add_parser('push', help='Push dataset to the repository')
parser_push.add_argument('name', help='Dataset name')
parser_push.add_argument('-f', '--force', action='store_true', help='Force upload even if dataset exists in the repository')
parser_purge = subparsers.add_parser('purge', help='Purge local dataset')
parser_purge.add_argument('name', help='Dataset name')
parser_create = subparsers.add_parser('create', help='Create empty local dataset with "train" and "test" partitions.')
parser_create.add_argument('name', help='Dataset name')
parser_create.add_argument('-f', '--force', action='store_true', help='If local dataset with the same name already exists, purge it.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
if args.cmd == 'ls':
for x in api.ls(remote=args.remote):
print(x)
elif args.cmd == 'register':
api.register(args.directory, args.name, force=args.force)
elif args.cmd == 'pull':
api.pull(args.name, force=args.force)
elif args.cmd == 'push':
api.push(args.name, force=args.force)
elif args.cmd == 'purge':
api.purge(args.name)
elif args.cmd == 'create':
api.create(args.name, force=args.force)
else:
parser.error('Unknown command')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 13:38:16 2021
@author: 1999a
"""
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, utils
from dataload import loadPreprocess
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
import numpy as np
features, labels = loadPreprocess()
enc = OneHotEncoder()
labels = enc.fit_transform(np.array(labels).reshape(-1,1)).toarray()
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state = 42)
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2, random_state = 16)
x_train = tf.expand_dims(x_train, axis=-1)
x_valid = tf.expand_dims(x_valid, axis=-1)
x_test = tf.expand_dims(x_test, axis=-1)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(20, 172, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(31, activation="softmax"))
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=100,
validation_data=(x_valid, y_valid))
test = np.argmax(model.predict(x_test), axis=-1)
print(metrics.accuracy_score(np.argmax(y_test, axis=-1), test)) |
import csv
import sys
import time
import datetime
###
# SEARCH FOR THE INTERACTORS, CREATES THE INTERACTOME AND THE LIST OF INTERACTORS
# by Paolo Tieri, CNR, paolo.tieri@cnr.it, 2017-2021
# ported from Python 2 to Python 3 on May 2021
###
### print instructions
print('\n'+"###########################################################################################"+
'\n'+"# TORS SEARCHES FOR BINARY INTERACTION IN PPI DATASETS STARTING FROM A LIST OF SEED GENES #"+'\n'+
"###########################################################################################"+'\n'+'\n'+
"* * * * * * *"+'\n'+'\n'+
"Instructions:"+'\n'+'\n'+
"Step 1: enter the file name of the PPI dataset containing all interaction pairs,"+'\n'+
"the dataset must be at least two columns (interactor A - interactor B) and" +'\n'+
"space-delimited format (delimiters can be changed in the code)"+'\n'+'\n'+
"Step 2: enter the column number related to the first interactor ID in the PPI dataset"+'\n'+'\n'+
"Step 3: enter the column number related to the second interactor ID in the PPI dataset"+'\n'+'\n'+
"Step 4: enter the file name containing the list of the seed genes, it must be a single column"+'\n'+'\n'+
"Step 5: a report named 'user-input seed gene list name'+'interactions_results.txt' is generated" +'\n'+'\n'+
"* * * * * * *" +'\n'+'\n'
)
### end print instructions
###
### reading inputs
# enter *** reference interactome *** filename and then the *** columns to be read (1°, 2°, etc) ***
file_to_read_from = input("Enter reference interactome filename: ")
c1 = int(input("Enter interactor A column to read (1,2,3...): "))
c1=c1-1 # this because python starts counting form 0, not 1
c2 = int(input("Enter interactor B column to read (1,2,3...): "))
c2=c2-1
# enter *** seed list *** filename:
read_list = input("Enter seed list filename: ")
# calculating elapsed computing time
start_time = time.time()
###
### reading datasets and computation
list_to_read = open(read_list, 'r') # list of seed nodes
seed_list = list_to_read.readlines() # uploads the list in memory line by line
# strips away newlines:
converted_seed_list = []
for element in seed_list:
converted_seed_list.append(element.strip())
### initializing as many data lists as the columns you want (not all):
col1, col2, seedfound, interactorsfound = [],[],[],[]
### read given columns with given delimiter in file and memorize
### WARNING ------- ALWAYS CHECK THE DELIMITER ---------- WARNING ###
with open(file_to_read_from, 'r') as file_in:
reader = csv.reader(file_in, delimiter=' ') # can as well be ',' or '\t' (tab) or ';' or '\s' or ' ' (space) etc
for row in reader:
col1.append(row[c1]) # assuming col 1 in the file is one of the 2 you want
col2.append(row[c2]) # assuming col 2 in the file is one of the 2 you want
### save stdout to file:
orig_stdout = sys.stdout
sys.stdout = open(read_list+'_interactions_results.txt', 'w')
print("***** FILE START *****")
print("")
print("Interaction dataset: " + file_to_read_from)
print("")
print("Seed genes dataset: " + read_list)
print("")
### search for seed genes in the two columns, count and write them out
number_of_seed_genes = str(len(converted_seed_list))
print('\n' + "#########################" +'\n')
print('\n'+"** List of original " + number_of_seed_genes + " SEED GENES"+'\n')
print(converted_seed_list)
print('\n'+"** List of SEED GENES found in the interaction dataset " + file_to_read_from + "(counter - gene name)" + '\n')
counter_seed = 0
for x in range(len(col1)):
if (col1[x] in converted_seed_list):
if (col1[x] not in seedfound):
counter_seed = counter_seed + 1
seedfound.append(col1[x])
print (counter_seed,col1[x])
else:
pass
else:
pass
for x in range(len(col2)):
if (col2[x] in converted_seed_list):
if (col2[x] not in seedfound):
counter_seed = counter_seed + 1
seedfound.append(col2[x])
print (counter_seed,col2[x])
else:
pass
else:
pass
number_seed_found = str(counter_seed)
print('\n'+ "** No." + number_seed_found + " SEED GENES found in " + file_to_read_from + " out of " + number_of_seed_genes + " original seed genes" + '\n')
### search if there are interactions *** among and only among nodes in the list *** and print them
print('\n' + "#########################" +'\n')
print('\n'+"** List of Interactions among SEED GENES THEMSELVES only " + "(counter - line in file - gene A - gene B)" + '\n')
counter=0 # line counter (number of interactions detected)
# check if col1 entry and col2 entry are into list, print on screen and write on file
for x in range(len(col1)):
if (col1[x] in converted_seed_list and col2[x] in converted_seed_list):
counter=counter+1
print (counter,x,col1[x],col2[x])
# new_interactome.write(str(counter)+' '+str(x)+' ' +str(col1[x])+' '+str(col2[x])+'\n')
else:
pass
### searches and counts interactors of the seed genes:
print('\n' + "#########################" +'\n')
print('\n'+"** List of INTERACTORS (non seed genes) found in the dataset " + "(counter - gene name)" + '\n')
counter_interactors = 0
for x in range(len(col1)):
if (col1[x] in converted_seed_list and col2[x] not in interactorsfound and col2[x] not in converted_seed_list):
counter_interactors = counter_interactors + 1
interactorsfound.append(col2[x])
print (counter_interactors,col2[x])
else:
pass
for x in range(len(col2)):
if (col2[x] in converted_seed_list and col1[x] not in interactorsfound and col1[x] not in converted_seed_list):
counter_interactors = counter_interactors + 1
interactorsfound.append(col1[x])
print (counter_interactors,col1[x])
else:
pass
print('\n' + "Interactors found in dataset: " + str(counter_interactors) + '\n')
### search interactions among seed genes and interactors in the database and print them
print('\n' + "#########################" +'\n')
print('\n'+"** List of interactions among SEED GENES and THEIR DIRECT INTERACTORS " + "(counter - line in file - gene A - gene B)"+ '\n')
counter_interactions = 0 # line counter (number of interactions detected)
# check if col1 entry or col2 entry are into list, print on screen and write on file
for x in range(len(col1)):
if (col1[x] in converted_seed_list or col2[x] in converted_seed_list):
counter_interactions = counter_interactions+1
print (counter_interactions, x, col1[x], col2[x])
# new_interactome.write(str(counter_interactions)+' '+str(x)+' ' +str(col1[x])+' '+str(col2[x])+'\n')
else:
pass
### search all interactions seed genes-interactors and interactors-interactors and print them
print('\n' + "#########################" +'\n')
print('\n'+"** List of interactions among INTERACTORS only " + "(counter - line in file - gene A - gene B)"+ '\n')
counter_nonseeds = 0 # line counter (number of interactions detected)
# check if col1 entry and col2 entry are into converted_seed_list, print on screen and write on file
for x in range(len(col1)):
if (col1[x] in interactorsfound and col2[x] in interactorsfound):
counter_nonseeds = counter_nonseeds + 1
print (counter_nonseeds, x, col1[x], col2[x])
# new_interactome.write(str(counter)+' '+str(x)+' ' +str(col1[x])+' '+str(col2[x])+'\n')
else:
pass
elapsed_time = time.time() - start_time
print('\n' + "#########################" +'\n')
print("")
print("* * * * * * *" +'\n')
print("SUMMARY" + '\n')
print("* * * * * * *" +'\n')
print('\n' + "Uploaded interaction dataset file: " + file_to_read_from)
print('\n' + "Uploaded seed genes dataset file: " + read_list)
print('\n'+ "SEED GENES found in interaction dataset: "+ number_seed_found + " out of " + number_of_seed_genes + " original seed genes")
print('\n'+"INTERACTORS of seed genes found in interaction dataset: " + str(counter_interactors))
print('\n'+"INTERACTIONS involving SEED GENES only: " + str(counter))
print('\n'+"INTERACTIONS involving SEED GENES and THEIR INTERACTORS: " + str(counter_interactions))
print('\n'+"INTERACTIONS involving INTERACTORS only: " + str(counter_nonseeds))
print('\n' + "Computed on", datetime.datetime.now(), "from seed dataset", read_list, "and interaction dataset", file_to_read_from)
print('\n' + "Elapsed computing time: " + str(elapsed_time) + " seconds" + '\n')
print("* * * * * * *" +'\n')
print("***** END OF FILE *****")
# new_interactome.close() # close the file
sys.stdout.close()
sys.stdout = orig_stdout
print('\n' + "########################################################")
print('\n' + "Results have been saved in file " + read_list +"_interactions_results.txt")
print('\n' + "Uploaded interaction dataset file: " + file_to_read_from)
print('\n' + "Uploaded seed genes dataset file: " + read_list)
print('\n'+ "SEED GENES found in interaction dataset: "+ number_seed_found + " out of " + number_of_seed_genes + " original seed genes")
print('\n'+"INTERACTORS of seed genes found in interaction dataset: " + str(counter_interactors))
print('\n'+"INTERACTIONS involving SEED GENES only: " + str(counter))
print('\n'+"INTERACTIONS involving SEED GENES and THEIR INTERACTORS: " + str(counter_interactions))
print('\n'+"INTERACTIONS involving INTERACTORS only: " + str(counter_nonseeds))
print('\n' + "Computed on", datetime.datetime.now(), "from seed dataset", read_list, "and interaction dataset", file_to_read_from)
print('\n' + "Elapsed computing time: " + str(elapsed_time) + " seconds" + '\n')
|
import collections
# def _read_file():
# for row in open("input"):
# yield row
def only_unique(words):
# A Set, with counter for duplicated words.
counts = collections.Counter(words)
# A list of tuples with most common words (I only ask for the most common one).
# The tuple is like a dict, key : value.
# From list get first (only) elem and access the value.
return counts.most_common(1)[0][1] <= 1
def count_valid_readable_code(file, anagram):
i = 0
for row in open(file):
words = row.split()
if anagram:
words = [''.join(sorted(word)) for word in words]
if only_unique(words):
i += 1
return i
def count_valid_list_comprehension(file):
return sum([1 for row in open(file) if only_unique(row.split())])
def main(file):
print("Duplicated passwords readable: ", count_valid_readable_code(file, anagram=False))
print("Duplicated passwords list comprehension: ", count_valid_list_comprehension(file))
print("Duplicated passwords readable: ", count_valid_readable_code(file, anagram=True))
if __name__ == "__main__":
main("input")
|
#!/usr/bin/python3
import asyncio
import logging
import sys
from sqlalchemy.exc import ProgrammingError
import similar_songs
import song_creator
import tiered_song_creator
from database.Session import get_session
sys.path.append("/opt/")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def _main():
import argument_parser
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.FileHandler("log1.log"),
logging.StreamHandler()])
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
args = argument_parser.get_args()
if (args.copy != None):
import copy_songs
logger.error(args.copy)
copy_songs.connect(args.copy[0], args.copy[1])
return
session = get_session(timeout=args.wait)()
limit = float('inf')
if args.number >= 0:
limit = args.number
if args.seed:
raise Exception('Elastic search functions disabled')
if args.continue_songs:
if args.number < 0: # -1 and -2 are create all here
logger.info("Creating songs")
song_creator.batch_all(session)
logger.info("Created songs")
else:
song_creator.create_in_batch(session, args.number)
if args.merge:
similar_songs.handle_close_songs(
session, skip_user_input=args.user_input_disabled, limit=limit)
if args.force_create:
similar_songs.handle_close_songs(session,
skip_user_input=False,
limit=limit,
force_create_new_songs=True)
if args.tier:
tiered_song_creator.SongCreator(session).batch_all()
if args.elastic_tier:
import tiered_song_elastic
tiered_song_elastic.SongCreator(session).batch_all()
logger.info("done")
if __name__ == "__main__":
_main() |
import sys
sys.path.append('../../python')
import inject
inject.configure()
import uuid
import json
import datetime
import logging
from model.registry import Registry
from model.connection.connection import Connection
from model.assistance.utils import Utils
from model.assistance.assistance import AssistanceModel
from model.assistance.justifications.status import Status
from model.assistance.justifications.informedAbsenceJustification import InformedAbsenceJustification, InformedAbsenceJustificationDAO
from model.assistance.justifications.compensatoryJustification import CompensatoryJustification, CompensatoryJustificationDAO
from model.assistance.justifications.outTicketJustification import OutTicketWithoutReturnJustification, OutTicketWithReturnJustification, OutTicketJustificationDAO
from model.assistance.justifications.art102Justification import Art102Justification, Art102JustificationDAO
from model.assistance.justifications.preExamJustification import UniversityPreExamJustification, SchoolPreExamJustification, PreExamJustificationDAO
from model.assistance.justifications.summerBreakJustification import SummerBreakJustification, SummerBreakJustificationDAO
from model.assistance.justifications.taskJustification import TaskWithoutReturnJustification, TaskWithReturnJustification, TaskJustificationDAO
from model.assistance.justifications.holidayJustification import HolidayJustification, HolidayJustificationDAO
from model.assistance.justifications.strikeJustification import StrikeJustification, StrikeJustificationDAO
from model.assistance.justifications.birthdayJustification import BirthdayJustification, BirthdayJustificationDAO
from model.assistance.justifications.bloodDonationJustification import BloodDonationJustification, BloodDonationJustificationDAO
from model.assistance.justifications.evaluationJustification import EvaluationJustification, EvaluationJustificationDAO
from model.assistance.justifications.scheduleJustification import ScheduleJustification, ScheduleJustificationDAO
from model.assistance.justifications.weatherJustification import WeatherJustification, WeatherJustificationDAO
from model.assistance.justifications.librarianDayJustification import LibrarianDayJustification, LibrarianDayJustificationDAO
from model.assistance.justifications.trainingJustification import TrainingJustification, TrainingJustificationDAO
from model.assistance.justifications.lateArrivalJustification import LateArrivalJustification, LateArrivalJustificationDAO
from model.assistance.justifications.authorityJustification import AuthorityJustification, AuthorityJustificationDAO
from model.assistance.justifications.resolution638Justification import Resolution638Justification, Resolution638JustificationDAO
from model.assistance.justifications.shortDurationJustification import ShortDurationJustification, ShortDurationJustificationDAO
from model.assistance.justifications.longDurationJustification import LongDurationJustification, LongDurationJustificationDAO
from model.assistance.justifications.familyAttentionJustification import FamilyAttentionJustification, FamilyAttentionJustificationDAO
from model.assistance.justifications.mourningJustification import MourningFirstGradeJustification, MourningSecondGradeJustification, MourningRelativeJustification, MourningJustificationDAO
from model.assistance.justifications.artJustification import ARTJustification, ARTJustificationDAO
from model.assistance.justifications.prenatalJustification import PrenatalJustification, PrenatalJustificationDAO
from model.assistance.justifications.winterBreakJustification import WinterBreakJustification, WinterBreakJustificationDAO
from model.assistance.justifications.paternityJustification import PaternityJustification, PaternityJustificationDAO
from model.assistance.justifications.maternityJustification import MaternityJustification, MaternityJustificationDAO
from model.assistance.justifications.marriageJustification import MarriageJustification, MarriageJustificationDAO
from model.assistance.justifications.leaveWithoutSalaryJustification import LeaveWithoutSalaryJustification, LeaveWithoutSalaryJustificationDAO
from model.assistance.justifications.suspensionJustification import SuspensionJustification, SuspensionJustificationDAO
from model.assistance.justifications.travelJustification import TravelJustification, TravelJustificationDAO
from model.assistance.justifications.medicalCertificateJustification import MedicalCertificateJustification, MedicalCertificateJustificationDAO
"""
UNDEFINED = 0
PENDING = 1
APPROVED = 2
REJECTED = 3
CANCELED = 4
"""
status = ["UNDEFINED","PENDING","APPROVED","REJECTED","CANCELED"]
def getStatus(s):
return status.index(s)
def setStatus(con, j):
"""
Agrega el estado de las justificaciones
"""
cur = con.cursor()
try:
cur.execute('select * from assistance.justifications_requests_status where request_id = %s order by created asc',(j.id,))
for s in cur:
logging.info('obteniendo estado : {}:{}'.format(s['status'], s['request_id']))
status = Status(s['user_id'], s['created'])
status.status = getStatus(s['status'])
j.setStatus(status)
j.persist(con)
finally:
cur.close()
def _isContiguos(date1, date2):
"""
date.weekday() ==> 0 > Lunes, 1 > Martes, 2 > Miércoles, 3 > Jueves, 4 > Viernes, 5 > Sábado, 6 > Domingo
"""
diff = abs((date2 - date1).days)
if (diff <= 1):
return True
""" por si es fin de semana """
if (diff <= 3 and date1.weekday() == 4):
return True
return False
class SingleJustificationMigrate():
"""
atributos de clase: id, dao
"""
@classmethod
def migrateAll(cls, con):
for c in cls.__subclasses__():
c.migrate(con)
@classmethod
def createJustification(cls, date, userId, ownerId):
j = cls.clazz(date, userId, ownerId)
logging.info('migrando {}'.format(j.getIdentifier()))
return j
@classmethod
def getStock(cls, con):
cur = con.cursor()
try:
cur.execute('select user_id, stock, calculated from assistance.justifications_stock where justification_id = %s',(cls.id,))
if cur.rowcount <= 0:
return []
else:
return cur.fetchall()
finally:
cur.close()
@classmethod
def updateStock(cls, con):
pass
@classmethod
def migrate(cls, con):
cur = con.cursor()
try:
# creo la tabla
cls.dao._createSchema(con)
cls.updateStock(con)
cur.execute('select id, user_id, requestor_id, jbegin from assistance.justifications_requests where justification_id = %s',(cls.id,))
if cur.rowcount <= 0:
return
for jr in cur:
logging.info('obteniendo justificacion : {}:{}'.format(jr['id'], jr['requestor_id']))
userId = jr['user_id']
ownerId = jr['requestor_id']
date = jr['jbegin']
just = cls.createJustification(date, userId, ownerId)
just.id = jr['id']
if (len(just.findById(con,[just.id])) <= 0):
setStatus(con, just)
finally:
cur.close()
class InformedAbsenceMigrate(SingleJustificationMigrate):
id = 'e0dfcef6-98bb-4624-ae6c-960657a9a741'
dao = InformedAbsenceJustificationDAO
clazz = InformedAbsenceJustification
class CompensatoryMigrate(SingleJustificationMigrate):
id = '48773fd7-8502-4079-8ad5-963618abe725'
dao = CompensatoryJustificationDAO
clazz = CompensatoryJustification
@classmethod
def updateStock(cls, con):
result = cls.getStock(con)
for r in result:
cls.clazz.updateStock(con, r['user_id'], r['stock'], r['calculated'])
class Art102Migrate(SingleJustificationMigrate):
id = "4d7bf1d4-9e17-4b95-94ba-4ca81117a4fb"
dao = Art102JustificationDAO
clazz = Art102Justification
class HolidayMigrate(SingleJustificationMigrate):
id = "5ec903fb-ddaf-4b6c-a2e8-929c77d8256f"
dao = HolidayJustificationDAO
clazz = HolidayJustification
class StrikeMigrate(SingleJustificationMigrate):
id = "874099dc-42a2-4941-a2e1-17398ba046fc"
dao = StrikeJustificationDAO
clazz = StrikeJustification
class BirthdayMigrate(SingleJustificationMigrate):
id = "b309ea53-217d-4d63-add5-80c47eb76820"
dao = BirthdayJustificationDAO
clazz = BirthdayJustification
class BloodDonationMigrate(SingleJustificationMigrate):
id = "e8019f0e-5a70-4ef3-922c-7c70c2ce0f8b"
dao = BloodDonationJustificationDAO
clazz = BloodDonationJustification
class EvaluationMigrate(SingleJustificationMigrate):
id = "5289eac5-9221-4a09-932c-9f1e3d099a47"
dao = EvaluationJustificationDAO
clazz = EvaluationJustification
class ScheduleMigrate(SingleJustificationMigrate):
id = "3fb52f24-3eff-4ca2-8133-c7a3abfc7262"
dao = ScheduleJustificationDAO
clazz = ScheduleJustification
class WeatherMigrate(SingleJustificationMigrate):
id = "3d486aa0-745a-4914-a46d-bc559853d367"
dao = WeatherJustificationDAO
clazz = WeatherJustification
class LibrarianDayMigrate(SingleJustificationMigrate):
id = "5c548eab-b8fc-40be-bb85-ef53d594dca9"
dao = LibrarianDayJustificationDAO
clazz = LibrarianDayJustification
class TrainingMigrate(SingleJustificationMigrate):
id = "508a9b3a-e326-4b77-a103-3399cb65f82a"
dao = TrainingJustificationDAO
clazz = TrainingJustification
class LateArrivalMigrate(SingleJustificationMigrate):
id = "7e180d9d-0ef1-48a7-9f3f-26a0170cc2f7"
dao = LateArrivalJustificationDAO
clazz = LateArrivalJustification
class AuthorityMigrate(SingleJustificationMigrate):
id = "c32eb2eb-882b-4905-8e8f-c03405cee727"
dao = AuthorityJustificationDAO
clazz = AuthorityJustification
class RangedJustificationMigrate():
"""
atributos de clase: id, dao, clazz
"""
@classmethod
def migrateAll(cls, con):
for c in cls.__subclasses__():
c.migrate(con)
@classmethod
def createJustification(cls, userId, ownerId, start, days):
j = cls.clazz(start, days, userId, ownerId)
logging.info('migrando {}'.format(j.getIdentifier()))
return j
@classmethod
def migrate(cls, con):
cur = con.cursor()
try:
# creo la tabla
cls.dao._createSchema(con)
cur.execute('select id, user_id, requestor_id, jbegin from assistance.justifications_requests where justification_id = %s order by user_id, jbegin asc',(cls.id,))
if cur.rowcount <= 0:
return
userId = None
days = 0
for jr in cur:
logging.info('obteniendo justificacion : {}:{}'.format(jr['id'], jr['requestor_id']))
if userId is None:
userId = jr['user_id']
ownerId = jr['requestor_id']
start = jr['jbegin'].date()
end = jr['jbegin'].date()
""" si cambio de usuario o los dias no son contiguos persisto los datos """
if userId != jr['user_id'] or not _isContiguos(end, jr['jbegin'].date()):
days = (end - start).days + 1
just = cls.createJustification(userId, ownerId, start, days)
just.id = jr["id"]
if (len(just.findById(con,[just.id])) <= 0):
setStatus(con, just)
""" inicializo los datos """
userId = jr['user_id']
ownerId = jr['requestor_id']
start = jr['jbegin'].date()
end = jr['jbegin'].date()
""" persisto el ultimo que me quedo """
days = (end - start).days + 1
just = cls.createJustification(userId, ownerId, start, days)
just.id = jr["id"]
if (len(just.findById(con,[just.id])) <= 0):
setStatus(con, just)
finally:
cur.close()
class ShortDurationMigrate(RangedJustificationMigrate):
id = 'f9baed8a-a803-4d7f-943e-35c436d5db46'
dao = ShortDurationJustificationDAO
clazz = ShortDurationJustification
class LongDurationMigrate(RangedJustificationMigrate):
id = "a93d3af3-4079-4e93-a891-91d5d3145155"
dao = LongDurationJustificationDAO
clazz = LongDurationJustification
class FamilyAttentionMigrate(RangedJustificationMigrate):
id = "b80c8c0e-5311-4ad1-94a7-8d294888d770"
dao = FamilyAttentionJustificationDAO
clazz = FamilyAttentionJustification
class MourningMigrate(RangedJustificationMigrate):
id = "0cd276aa-6d6b-4752-abe5-9258dbfd6f09"
dao = MourningJustificationDAO
clazz = MourningFirstGradeJustification
class ARTMigrate(RangedJustificationMigrate):
id = "70e0951f-d378-44fb-9c43-f402cbfc63c8"
dao = ARTJustificationDAO
clazz = ARTJustification
class PrenatalMigrate(RangedJustificationMigrate):
id = "aa41a39e-c20e-4cc4-942c-febe95569499"
dao = PrenatalJustificationDAO
clazz = PrenatalJustification
class WinterBreakMigrate(RangedJustificationMigrate):
id = "f7464e86-8b9e-4415-b370-b44b624951ca"
dao = WinterBreakJustificationDAO
clazz = WinterBreakJustification
class PaternityMigrate(RangedJustificationMigrate):
id = "e249bfce-5af3-4d99-8509-9adc2330700b"
dao = PaternityJustificationDAO
clazz = PaternityJustification
class MaternityMigrate(RangedJustificationMigrate):
id = "68bf4c98-984d-4b71-98b0-4165c69d62ce"
dao = MaternityJustificationDAO
clazz = MaternityJustification
class MarriageMigrate(RangedJustificationMigrate):
id = "30a249d5-f90c-4666-aec6-34c53b62a447"
dao = MarriageJustificationDAO
clazz = MarriageJustification
class LeaveWithoutSalaryMigrate(RangedJustificationMigrate):
id = "1c14a13c-2358-424f-89d3-d639a9404579"
dao = LeaveWithoutSalaryJustificationDAO
clazz = LeaveWithoutSalaryJustification
class SuspensionMigrate(RangedJustificationMigrate):
id = "bfaebb07-8d08-4551-b264-85eb4cab6ef1"
dao = SuspensionJustificationDAO
clazz = SuspensionJustification
class TravelMigrate(RangedJustificationMigrate):
id = "7747e3ff-bbe2-4f2e-88f7-9cc624a242a9"
dao = TravelJustificationDAO
clazz = TravelJustification
class MedicalCertificateMigrate(RangedJustificationMigrate):
id = "478a2e35-51b8-427a-986e-591a9ee449d8"
dao = MedicalCertificateJustificationDAO
clazz = MedicalCertificateJustification
class PreExamMigrate(RangedJustificationMigrate):
id = 'b70013e3-389a-46d4-8b98-8e4ab75335d0'
dao = PreExamJustificationDAO
clazz = UniversityPreExamJustification
class LAOMigrate(RangedJustificationMigrate):
id = '76bc064a-e8bf-4aa3-9f51-a3c4483a729a'
dao = SummerBreakJustificationDAO
clazz = SummerBreakJustification
class Resolution638Migrate(RangedJustificationMigrate):
id = '50998530-10dd-4d68-8b4a-a4b7a87f3972'
dao = Resolution638JustificationDAO
clazz = Resolution638Justification
def createBS(con):
"""
migra las justificaciones Boleta de Salida
"""
cur = con.cursor()
try:
logging.info("Migrando las Boleta de Salida")
# creo la tabla
OutTicketJustificationDAO._createSchema(con)
# id de la justificación Boleta de Salida
id = 'fa64fdbd-31b0-42ab-af83-818b3cbecf46'
cur.execute('select id, user_id, requestor_id, jbegin, jend from assistance.justifications_requests where justification_id = %s',(id,))
for jr in cur:
logging.info('obteniendo justificacion : {}:{}'.format(jr['id'], jr['requestor_id']))
userId = jr['user_id']
ownerId = jr['requestor_id']
date = jr['jbegin']
end = jr['jend']
if end is None:
just = OutTicketWithoutReturnJustification(date, userId, ownerId)
else:
just = OutTicketWithReturnJustification(date, end, userId, ownerId)
just.id = jr['id']
if (len(just.findById(con,[just.id])) <= 0):
setStatus(con, just)
finally:
cur.close()
def createTask(con):
"""
migra las justificaciones Boleta en Comisión
"""
cur = con.cursor()
try:
logging.info("Migrando las Boleta en Comisión")
# creo la tabla
TaskJustificationDAO._createSchema(con)
# id de la justificación Boleta en Comisión
id = 'cb2b4583-2f44-4db0-808c-4e36ee059efe'
cur.execute('select id, user_id, requestor_id, jbegin, jend from assistance.justifications_requests where justification_id = %s',(id,))
for jr in cur:
logging.info('obteniendo justificacion : {}:{}'.format(jr['id'], jr['requestor_id']))
userId = jr['user_id']
ownerId = jr['requestor_id']
start = jr['jbegin']
end = jr['jend']
if end is None:
end = getEndSchedule(userId, start)
just = TaskWithoutReturnJustification(start, end, userId, ownerId)
else:
just = TaskWithReturnJustification(start, end, userId, ownerId)
just.id = jr['id']
if (len(just.findById(con,[just.id])) <= 0):
setStatus(con, just)
finally:
cur.close()
def getEndSchedule(userId, date):
# obtengo el schedule correspondiente
wps = assistance.getWorkPeriods(con, [userId], date, date)
wpsList = wps[userId]
if len(wpsList) <= 0:
raise Exception('No tiene un horario para la fecha ingresada')
# saco el end del schedule
end = wpsList[0].getEndDate()
end = Utils._localizeLocal(end) if Utils._isNaive(end) else end
return end
assistance = inject.instance(AssistanceModel)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
reg = inject.instance(Registry)
conn = Connection(reg.getRegistry('dcsys'))
try:
con = conn.get()
createBS(con)
createTask(con)
SingleJustificationMigrate.migrateAll(con)
RangedJustificationMigrate.migrateAll(con)
con.commit()
finally:
conn.put(con)
|
def print_board(board):
for i in board:
print(i)
def check_diagonal(row, col, N, board):
'''
Helper function to is_safe
'''
row_2 = row
col_2 = col
while row != 0 and col != 0:
row -= 1
col -= 1
if board[row][col] == 1:
return False
while col_2 != 0 and row_2 != N-1:
row_2 += 1
col_2 -= 1
if board[row_2][col_2] == 1:
return False
return True
def is_safe(row, col, N, board):
'''
checks that the queen is in a valid position
'''
for i in range(col):
if board[row][i] == 1:
return False
if check_diagonal(row, col, N, board) == False:
return False
return True
def place(col, N, board):
'''
Finds 1 solution to the n-queen problem
'''
if col == N:
return board
for i in range(N):
if is_safe(i, col, N, board):
board[i][col] = 1
if place(col + 1, N, board) == board:
return board
else:
board[i][col] = 0
return False
def place_2(col, N, board, solutions):
'''
Find all solutions to the problem
'''
if col == N:
board = [[i for i in row] for row in board] # A copy of the board is required
solutions.append(board)
return solutions
for i in range(N):
if is_safe(i, col, N, board):
board[i][col] = 1
solutions+place_2(col + 1, N, board, solutions) # where the magic happens
board[i][col] = 0
return solutions
def solve(N):
board = [[0 for i in range(N)] for i in range(N)]
solutions = []
return place_2(0, N, board, solutions)
def get_pos(array):
'''
Helper func to format_func
'''
result = []
N = len(array)
for i in range(N):
for x in range(N):
if array[x][i] == 1:
result.append(x+1)
return result
def format_func(solutions, N):
'''
formatting it in the way geeksforgeeks wanted it
'''
final_positions = []
for sol in solutions:
final_positions.append(get_pos(sol))
result = ''
for sol in final_positions:
result += '['
for num in sol:
result += str(num) + ' '
result += '] '
return result
t = int(input())
for i in range(t):
N = int(input())
ans = solve(N)
if ans == []:
print('-1')
else:
print(format_func(ans, N))
|
f = open('unidade', 'r')
f.seek(121509 + 24300 + 3)
n = int(input('Qual bloco?'))
block = -1
conteudo = ''
while (block != n):
conteudo = f.readline()
block+=1
print('conteudo da linha:')
print(conteudo) |
"""Reverse_Tool URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from . import view
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', view.hello),
url(r'common/', include('common.urls')),
url(r'formattree/', view.getFormatTree),
url(r'fileUpload/', view.fileUpload),
url(r'queryFileNum/', view.getFileNum),
url(r'queryFileSize', view.getFileSize),
url(r'queryFileLists', view.getFileLists)
]
|
'''
一种双核CPU的两个核能够同时的处理任务,现在有n个已知数据量的任务需要交给CPU处理,假设已知CPU的每个核1秒可以处理1kb,
每个核同时只能处理一项任务。n个任务可以按照任意顺序放入CPU进行处理,现在需要设计一个方案让CPU处理完这批任务所需的时间最少,
求这个最小的时间。
输入包括两行:
第一行为整数n(1 ≤ n ≤ 50)
第二行为n个整数length[i](1024 ≤ length[i] ≤ 4194304),表示每个任务的长度为length[i]kb,每个数均为1024的倍数。
输出一个整数,表示最少需要处理的时间。
问题实质是动态规划问题,把数组分成两部分,使得两部分的和相差最小。就是两个CPU各自处理的时间
'''
w = [0, 3072, 3072, 7168, 3072, 1024] # 任务大小
w = list(map(lambda x: int(x/1024), w))
p = w # 这题价值和任务重量一致
n = sum(w) / 2 + 1 # 背包承重为总任务的一半
optp = [[0 for j in range(n+1)] for i in range(len(w))]
for i in range(1, len(p)):
for j in range(1, n+1):
if j >= p[i]:
optp[i][j] = max(optp[i-1][j], optp[i-1][j-w[i]] + p[i])
else:
optp[i][j] = optp[i-1][j]
print(optp[-1][-1])
|
# /usr/bin
# Kirk Boyer : 2016 Google Code Jam
# Counting Sheep Problem : Qualifying Round
import sheepSolvers
import sys
import os.path
if __name__ == "__main__":
# pick solver based on input
input_name = sys.argv[2]
solver_name = sys.argv[1]
solver = getattr(sheepSolvers, solver_name)
prefix = os.path.splitext(input_name)[0]
output_name = prefix + ".out"
# get input list from file
with open(input_name, 'r') as f:
num_cases = int(f.readline())
cases = [solver(int(line)) for line in f]
with open(output_name, "w+") as fout:
for i in range(num_cases):
cases[i].solve()
# print("Case #{num}: {ans}".format(num=i, ans=cases[i].answer))
fout.write("Case #{num}: {ans}\n".format(
num=i+1,
ans=cases[i].answer))
|
import re, csv, json, os, requests
from lxml import etree
from pprint import pprint
from dateutil import parser
from rdflib import URIRef, Literal, Namespace, Graph
from rdflib.namespace import RDF, OWL, SKOS, DCTERMS, XSD, RDFS, FOAF
from config import EXIST, GRAPHDB
ELI = Namespace("http://data.europa.eu/eli/ontology#")
OIR = Namespace("http://oireachtas.ie/ontology#")
CEN = Namespace("http://www.metalex.eu/metalex/2008-05-02#")
FRBR= Namespace("http://purl.org/vocab/frbr/core#")
ORG = Namespace("http://www.w3.org/ns/org#")
DBO = Namespace("http://dbpedia.org/ontology/")
TIME = Namespace("http://www.w3.org/2006/time#")
oir = Namespace("http://oireachtas.ie/")
eli = Namespace("http://oireachtas.ie/eli/ie/oir/")
DATADIR = "../data"
NS = {'akn': "http://docs.oasis-open.org/legaldocml/ns/akn/3.0/CSD13"}
def initialize_graph(g):
#for initialzing new graphs
g.bind("oir", OIR)
g.bind("dcterms", DCTERMS)
def debate_triples(g, fp):
root = etree.parse(fp).getroot()
date = root.find(".//{*}FRBRWork/{*}FRBRdate").attrib['date']
house= root.find(".//{*}FRBRWork/{*}FRBRauthor").attrib['href'][1:]
debate_uri = root.find(".//{*}FRBRWork/{*}FRBRuri").attrib['value'][1:].replace("akn/ie", "ie/oireachtas")
g.add((oir[debate_uri], RDF.type, OIR.DebateRecord))
g.add((oir[debate_uri], DCTERMS.date, Literal(date, datatype=XSD.date)))
g.add((oir[debate_uri], OIR.inChamber, oir[house]))
for dbsect in root.xpath(".//akn:debateSection[@name='debate']|.//akn:debateSection[@name='question']", namespaces=NS):
dbs_uri = debate_uri + "/" + dbsect.attrib['eId']
heading = dbsect.find("./{*}heading").text
debate_type = dbsect.attrib['name']
g.add((oir[dbs_uri], RDF.type, OIR.DebateSection))
g.add((oir[dbs_uri], OIR.partOf, oir[debate_uri]))
g.add((oir[debate_uri], OIR.part, oir[dbs_uri]))
g.add((oir[dbs_uri], OIR.debateType, OIR[debate_type]))
g.add((oir[dbs_uri], DCTERMS.title, Literal(heading)))
try:
bill_uri = dbsect.attrib['refersTo'][1:].replace(".", "/")
g.add((oir[dbs_uri], OIR.subject, oir[bill_uri]))
g.add((oir[bill_uri], OIR.subjectOf, oir[dbs_uri]))
except KeyError:
pass
for spk in dbsect.xpath(".//akn:speech|.//akn:question", namespaces=NS):
contrib_uri = debate_uri + "/" + spk.attrib['eId']
pId = spk.attrib['by'][1:]
contrib_type = spk.tag.split("}")[-1].title()
g.add((oir[contrib_uri], RDF.type, OIR.contrib_type))
g.add((oir[contrib_uri], OIR.partOf, oir[dbs_uri]))
g.add((oir[dbs_uri], OIR.part, oir[contrib_uri]))
if len(pId)>0:
member = root.find(".//{*}TLCPerson[@eId='"+pId+"']").attrib['href'][1:]
g.add((oir[contrib_uri], OIR.madeBy, oir[member]))
g.add((oir[member], OIR.made, oir[contrib_uri]))
if "as" in spk.attrib.keys():
role = root.find(".//{*}TLCRole[@eId='"+spk.attrib['as'][1:]+"']").attrib['href'][1:]
if "to" in spk.attrib.keys():
questionee = root.find(".//{*}TLCRole[@eId='"+spk.attrib['to'][1:]+"']").attrib['href'][1:]
g.add((oir[contrib_uri], OIR.questionTo, oir[questionee]))
if "refersTo" in spk.attrib and spk.attrib['refersTo'].startswith("#pq"):
pq_uri = debate_uri + "/" + spk.attrib['refersTo'][1:]
g.add((oir[contrib_uri], OIR.answerTo, oir[pq_uri]))
g.add((oir[pq_uri, OIR.answer, oir[contrib_uri]]))
for para in spk.findall(".//{*}p"):
p_uri = debate_uri + "/" + para.attrib['eId']
g.add((oir[p_uri], RDF.type, OIR.DebateParagraph))
g.add((oir[p_uri], OIR.partOf, oir[contrib_uri]))
g.add((oir[contrib_uri], OIR.part, oir[p_uri]))
def get_debate_records():
dbrecs = etree.parse('../data/debateRecords_1919-2015.xml').getroot()
return dbrecs.xpath("debateRecord[@house='dail']")
def serialize_graph(g, file_name):
g.serialize(destination=(os.path.join(DATADIR, file_name)), format="turtle")
def main():
meta = "../data_2016-06-05/dail"
for fp in os.listdir(meta):
g= Graph()
initialize_graph(g)
debate_triples(g, os.path.join(meta, fp))
file_name = os.path.join(DATADIR, "debatesRDF/dail", fp.replace(".xml", ".ttl"))
serialize_graph(g, file_name)
import_path = "data/import/url/oireachtas"
data = {"context":"urn:debates",
"url": "file:///"+os.path.join(DATADIR, file_name),
"baseURI": "http://oireachtas.ie",
"dataFormat":"turtle"}
r = requests.post(GRAPHDB+import_path, data=data)
if r.status_code != 202:
print(fp, "\n", r.text)
break
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
__author__ = "Lee.le"
from Views.tools import *
from django.shortcuts import render, HttpResponse
import json
def progress(request):
"""
显示测试用例内容
:param request:
:return:
"""
if request.method == 'POST':
pass
config = configparser.ConfigParser()
config_Path = (os.path.abspath(os.path.join(os.getcwd(), "../multi_processframe/config.ini"))) # 设置配置路径
config.read(config_Path, encoding='utf-8-sig')
# config.read(path, encoding='GBK')
temp = config.get('config', "progress").split(",")
donelist = []
undonelist = []
for k, v in case_translate().items():
if v in temp:
donelist.append(k)
else:
undonelist.append(k)
return render(request, "case.html", {'addr': get_addr(), "donelist": donelist, 'data': case_translate(), 'undonelist':undonelist, 'donenum': len(donelist), 'undonenum': len(undonelist)})
def get_case(request):
config = configparser.ConfigParser()
config_Path = (os.path.abspath(os.path.join(os.getcwd(), "../multi_processframe/config.ini"))) # 设置配置路径
config.read(config_Path, encoding='utf-8-sig')
# config.read(path, encoding='GBK')
temp = config.get('config', "progress").split(",")
donelist = []
undonelist = []
for k, v in case_translate().items():
if v in temp:
donelist.append(k)
else:
undonelist.append(k)
reportlist, htmlname = getreport() # 获取所有报告
temp = 0 # 定义一个参数用来指定返回html的数据类型,1需要执行局部加载,0不需要执行局部加载
for i in htmlname: # 判断是否有报告未生成的,temp=1执行局部加载,并且把进度传到前端
file_Path = os.path.join(os.getcwd(), f"static/Report/{i}/{i}.html") # 判断是否有卫生而成报告的路径
if not os.path.exists(file_Path):
temp = 1
break
if temp == 1: # 当有报告未生成的时候,temp=1执行局部加载,并且把进度传到前端
return HttpResponse(json.dumps({"donelist": donelist, 'undonelist':undonelist, 'donenum': len(donelist), 'undonenum': len(undonelist)}))
else: # 条件不满足,返回404告诉前端不需要执行局部加载
notdo = 404
return HttpResponse(json.dumps({"notdo": notdo}))
|
__author__ = 'Mansoori'
from subprocess import call
from prism_trim import prism_trim
import ConfigParser
confige_file = './config.cfg'
class prism_trim_parser():
prism_trim;
prism_path = '.\data\prism output';
pathway_prob = [];
all_gene_pathway_prob = {}
def __init__(self):
self.prism_trim = prism_trim();
self.pathway_prob = [];
def var_print(self, vars=[], svar=None, firsts=[], relations={}):
if(svar not in vars):
vars.append(svar);
seconds = relations.keys();
for fvar in firsts:
if (fvar in seconds) and (fvar not in vars) :
fs = relations[fvar];
new_firsts = [];
for fsi in fs :
new_firsts.append(fs[fsi]['first']);
self.var_print(vars, fvar, new_firsts, relations);
for fvar in firsts :
if (fvar not in vars):
vars.append(fvar);
def find_path(self,root, pvars=[], relations={}):
if(root not in pvars):
pvars.append(root);
seconds = relations.keys();
if(root in seconds):
root_relations = relations[root];
for fri in root_relations:
fr = root_relations[fri]['first'];
if (fr not in pvars):
self.find_path(fr, pvars, relations);
def remove_unwanted_vars(self, all_vars=[], firsts={}, seconds={}):
for k in seconds.keys():
if(k not in all_vars):
ss = seconds[k];
for s in ss:
fi = ss[s]['first'];
if (fi in firsts):
ff = firsts[fi];
ks = ff.keys();
for cnt in ks:
if(ff[cnt]['second']==k):
firsts[fi].pop(cnt);
if(fi in firsts and (firsts[fi]) == {}) :
firsts.pop(fi);
seconds.pop(k);
def calculate_score(self, pathway=None, degs=None, all_genes=None):
relations = pathway.get_relations();
proteins = pathway.get_proteins();
components = pathway.get_compounds();
actions = pathway.get_actions();
groups = pathway.get_groups();
#free pathway probability
self.pathway_prob = [];
#find pathway last points
pathway_last_points = [];
first_of_pathways = pathway.get_rrelations();
seconds_of_pathways = pathway.get_relations();
for s in seconds_of_pathways.keys():
if (s not in first_of_pathways.keys()) :
pathway_last_points.append(s);
# add variable
all_vars = [];
for s in seconds_of_pathways.keys():
all_vars.append(s);
for f in first_of_pathways.keys():
if f not in all_vars:
all_vars.append(f);
old_last_points = {}
#copy pathway relations
firsts = {}
firsts_temp = pathway.get_rrelations().copy();
for f in firsts_temp:
l = {}
ll = firsts_temp [f];
for g in ll:
l[g] = ll[g];
firsts[f] = l;
#copy pathway relations
seconds = {}
seconds_temp = pathway.get_relations().copy();
for s in seconds_temp:
l = {}
ll = seconds_temp[s];
for g in ll:
l[g] = ll[g];
seconds[s] = l;
self.prism_trim.set_firsts_seconds(firsts, seconds);
self.prism_trim.set_all_variables(all_vars);
while (True):
trim_points = [];
begins = {}
ppoints = [];
new_last_points = {};
self.prism_trim.trim(old_last_points, begins, trim_points, new_last_points);
if len(trim_points) == 0 :
break;
else:
#after trim, the trim points are the variables that should be in the prism file
#the probability of the last points should be calculated.
self.calcute_trim_prob(old_last_points, new_last_points, trim_points, pathway, degs, all_genes);
old_last_points.clear();
for n in new_last_points:
old_last_points[n] = new_last_points[n];
if(n in pathway_last_points):
self.pathway_prob.append(new_last_points[n]['prob4']);
print '.',
sum_prob = 0.0;
for pp in self.pathway_prob:
sum_prob += float(pp);
#if float(pp) > max :
# max = float(pp);
return sum_prob;
def run_prism_file_in_linux(self, filename=None, profile=None):
path = '/home/ahmadreza/fatima/prism-4.3-src/bin/data';
#ppath = 'C:/Program Files/prism-4.1.beta2/bin';
#call([ppath+"\\prism "+path+'\\'+filename+' '+path+'\\prop.csl >out']);
#f = open(ppath+'/test1.sm','r');
#call(["myprism.bat",path+'/hsa03320.sm"',path+'/prop.csl"','>'+path+'/out"']);
#true call
call(["prism", path+"/"+filename, path+'/'+profile, '>/out' ]);
f = open('/out', 'r');
for line in f.readlines():
if(line.find('Result:')!=-1):
s = line.split(' ');
return s[1];
def run_prism_file_in_windows(self, filename=None, profile=None):
# read the config file
config = ConfigParser.ConfigParser()
config.read(confige_file)
FoPA_path = config.get('paths', 'FoPA_path')
prism_result = config.get('paths', 'prism_result')
path = FoPA_path+'/data/prism output';
#call(["myprism.bat", path+'/'+filename, path+'/'+profile, '>'+prism_result+'/out' ]);
call(["myprism.bat", './data/prism output/' + filename, './data/prism output/' + profile, '>'+prism_result+'/out']);
f = open(prism_result+'/out', 'r');
for line in f.readlines():
if(line.find('Result:')!=-1):
s = line.split(' ');
return s[1];
def build_prism_model(self, pathway=None, all_vars=[], last_points={}, degs=None, all_genes=None):
relations = pathway.get_relations();
proteins = pathway.get_proteins();
components = pathway.get_compounds();
actions = pathway.get_actions();
groups = pathway.get_groups();
filename = 'hsa'+pathway.id+'.sm';
f = open(self.prism_path+'/'+filename, 'w')
f.write('dtmc\n')
f.write('module '+"hsa"+pathway.id+'\n');
degnodes = degs.keys()
all_vars_names = []
protein_genes = pathway.get_protein_genes();
for vid in all_vars:
if vid in proteins:
vname = proteins[vid]['name'];
#get the genes of the vname
genes = []
if vname in protein_genes:
genes = protein_genes[vname];
else:
genes.append(vname);
if vname not in all_vars_names:
all_vars_names.append(vname);
isDEG = False;
isAll = False;
for gene in genes:
if gene in degnodes :
isDEG = True;
elif gene in all_genes.keys():
isAll = True;
if (isDEG==True) :
f.write(vname+': [-1..4] init -1; \n');
elif (isAll == True) :
f.write( vname+': [-1..4] init -1; \n');
else:
f.write( vname+': [-1..4] init -1; \n');
elif vid in components :
vname = components[vid]['name'];
if(vname not in all_vars_names):
all_vars_names.append(vname);
f.write( components[vid]['name']+': [0..4] init 2; \n');
elif vid in groups:
for comp in groups[vid]['components']:
if comp in proteins:
vname = proteins[comp]['name'];
if(vname not in all_vars_names):
all_vars_names.append(vname);
if vname in degs.keys() :
f.write(vname+': [-1..4] init -1; \n');
elif vname in all_genes.keys() :
f.write( vname+': [-1..4] init -1; \n');
else:
f.write( vname+': [-1..4] init -1; \n');
elif vid in self.need_action.values():
#vname = self.need_action[vid];
all_vars_names.append(vid);
f.write(vid+':[0..4] init 0;\n');
#for each variable add tree state
for vid in all_vars:
if vid in proteins:
vname = proteins[vid]['name'];
genes = []
if vname in protein_genes:
genes = protein_genes[vname];
else:
genes.append(vname);
isDEG = False;
isAll = False;
for gene in genes:
if gene in degnodes:
isDEG = True;
elif gene in all_genes.keys() :
isAll = True;
if (vid == '16'):
vid = vid;
if(vid in last_points.keys()):
prob3 = float(last_points[vid]['prob3']);
prob4 = float(last_points[vid]['prob4']) ;
rprob = 1-prob3-prob4;
f.write('[] ' + vname + '=-1 ->' + str(prob3) + ':(' + vname + "'=3)+" + str(prob4) + ":(" + vname + "'=4)+" +
str(rprob) + ":(" + vname + "'=0);\n")
else:
if (isDEG == True):
f.write('[] ' + vname + '=-1 -> 0.05:(' + vname + "'=1)+0.95:(" + vname + "'=2);\n")
elif isAll == True :
f.write('[] '+vname+'=-1 -> 0.05:('+vname+"'=0)+0.05:("+vname+"'=2)+0.9:("+vname+"'=1);\n")
else :
f.write(
'[] ' + vname + '=-1 -> 0.05:(' + vname + "'=1)+0.95:(" + vname + "'=0);\n")
#find the inhibition relations
inhibits = {};
first = True;
for r in relations:
c = relations[r];
inhibit = ""
for k in c:
el = c[k]
if(el['type_name'] == 'inhibition'):
id = el['first'];
if(id in proteins):
p = proteins[id];
elif(id in components):
p = components[id];
#TO DO: search in groups
else:
p = None;
if(p != None):
if(first):
inhibit = "(" + p['name'] + '<3 &' + p['name'] + '> -1';
first = False
else :
inhibit = inhibit + '&' + p['name']+'<3 &' + p['name'] + ' > -1';
if(first != True):
inhibit = inhibit + ')';
first = True
#r indicate the second part of a relation
inhibits[r] = inhibit;
winhibit = {}
firstsp = {}
secondsp = {}
#for each activation add a prism command
for r in relations:
c = relations[r];
for k in c:
el = c[k]
if(el['type']=='PPrel' or el['type']=='GErel'):
if(el['type_name'] == 'activation' or el['type_name']=='expression' or el['type_name'] == 'binding/association'
or el['type_name']=='indirect effect' or el['type_name'] == 'missing interaction' or el['type_name']=='compound' or
el['type_name'] == 'phosphorylation'):
idf = el['first'];
ids = el['second'];
if((idf in proteins) and (ids in proteins) and (idf in all_vars) and (ids in all_vars)):
pf = proteins[idf];
ps = proteins[ids];
firstsp[idf] = pf;
secondsp[ids] = ps;
#calulate the probability of the relation. the protein probability multiply the relation probability
prob = float(el['prob']) * float(pf['prob']);
probstr = '(('+pf['name']+'+'+ps['name']+'-3)/6)*'+str(prob);
st = '[] '+ '('+pf['name']+'=3 | '+pf['name']+'=4)'+' & '+ '('+ps['name']+'=1 | '+ps['name']+'=2)';
if(inhibits[r] != ''):
winhibit[r] = True;
st = st + ' & '+inhibits[r];
f.write(st + ' -> ' + probstr + ':(' + ps['name'] + '\'=((' + pf['name'] + '=3&' + ps['name'] + '=1)?3:4)) +' + '1-(' + probstr + ')' + ':(' + ps['name'] + '\'= 0' + ');\n');
elif(((idf in groups) or (ids in groups))and(idf in all_vars) and (ids in all_vars)):
stn1 = '';
stn2 = '' ;
stn3 = '';
stsn = '';
stfn = '';
probf = 0;
probs = 0;
if(idf in groups and len(groups[idf]['components'])>1):
firstsp[idf] = groups[idf];
first = True;
stf = '(';
stfn = 'max('
for comp in groups[idf]['components']:
if comp in proteins :
pgf = proteins[comp];
if (first==True) :
stf += pgf['name'] + '>2 ';
stfn += pgf['name'];
first = False;
else:
stf += '& ' + pgf['name'] + '>2';
stfn += ',' + pgf['name'];
probf = probf+float(pgf['prob']);
stf += ')';
stfn += ')=3';
probf = probf / 6*len(groups[idf]['components']);
elif(idf in proteins):
pf = proteins[idf];
firstsp[idf] = pf;
probf = float(pf['prob']);
stf = '('+pf['name'] + '>2)';
stfn = pf['name'] + '=3';
elif(len(groups[idf]['components'])==1):
pid = groups[idf]['components'][0];
pf = proteins[pid];
firstsp[idf] = pf;
probf = float(pf['prob']);
stf = '('+pf['name'] + '>2)';
stfn = pf['name'] + '=3';
sts = '';
if(ids in groups and len(groups[ids]['components'])>1):
secondsp[ids] = groups[ids];
first = True;
sts = '(';
stsn = 'max(';
for comp in groups[ids]['components']:
if comp in proteins:
if(first==True):
first = False;
else:
sts += '& ';
stsn += ','
pgs = proteins[comp];
sts += '('+ pgs['name'] + '=1 |'+pgs['name']+'=2)';
stsn += pgs['name'];
probs += pgs['prob'];
sts += ')';
stsn +=')=1';
first = True;
stn1 = ''
stn2 = ''
stn3 = ''
for comp in groups[ids]['components']:
if comp in proteins:
if(first == True):
first = False;
else:
stn1 += '&' ;
stn2 += '&';
stn3 += '&';
stn1 += '('+proteins[comp]['name']+'\'=(('+stfn+'&'+stsn+')?3:4)) ';
stn2 += '('+proteins[comp]['name']+'\'= 0'+')';
stn3 += '(' + proteins[comp]['name'] + '\'=0' + ')';
elif(ids in proteins):
ps = proteins[ids];
secondsp[ids] = ps;
probs = probs+float(ps['prob']);
sts = '('+ps['name'] +'=1 |'+ps['name']+'=2)';
stsn = ps['name'] +'=1';
stn1 = '('+ps['name'] + '\'=(('+stfn+'&'+stsn+')?3:4))';
stn2 = '('+ps['name'] + '\'=0)';
stn3 = '(' + ps['name'] + '\'=0' + ')';
elif(ids in groups):
if(len(groups[ids]['components'])==1):
pid = groups[ids]['components'][0];
ps = proteins[pid];
secondsp[ids] = ps;
sts = '('+ps['name'] +'=1 |'+ps['name']+'=2)';
stsn = ps['name'] +'=1';
stn1 = '('+ps['name'] + '\'=(('+stfn+'&'+stsn+')?3:4))';
stn3 = '(' + ps['name'] + '\'=0' +')';
prob = probf * float(el['prob']);
f.write('[]' + stf + '&' + sts + ' -> ' + str(prob) + ':' + stn1 + ' + 1-('+str(prob)+')'+':'+stn3+';\n');
elif(el['type_name'] == 'inhibition' and len(c)==1):
idf = el['first'];
ids = el['second'];
if((idf in proteins) and (ids in proteins) and (idf in all_vars) and (ids in all_vars)):
pf = proteins[idf];
ps = proteins[ids];
firstsp[idf] = pf;
secondsp[ids] = ps;
prob = float(el['prob']) * float(pf['prob']);
probstr = '((' + pf['name'] + '+' + ps['name'] + '-3)/6)*' + str(prob);
st = '[]'+ pf['name']+' >-1 &'+pf['name']+' < 3 & ('+ ps['name']+'=1|'+ps['name']+'=2)';
st2 = '[]'+ pf['name']+' > 2 & ('+ ps['name']+'=3|'+ps['name']+'=4)';
f.write(st + ' -> ' + probstr + ':(' + ps['name'] + '\'=' + ps[
'name'] + '+2) +' + '1-(' + probstr + ')' + ':(' + ps['name'] + '\'=0' + ');\n');
f.write(st2 + ' -> ' + probstr + ':(' + ps['name'] + '\'=' + ps[
'name'] + '-2) +' + '1-(' + probstr + ')' + ':(' + ps['name'] + '\'=0' + ');\n');
for n in firstsp:
if((n not in secondsp) and (n in all_vars) and (n not in last_points.keys())):
if(n not in groups):
if ( n in proteins) :
probstr = '((' + firstsp[n]['name'] + '+1)/6)*0.95*' + str(firstsp[n]['prob']);
else:
probstr = '((' + firstsp[n]['name'] + '+1)/6)*0.95';
f.write(
'[] ' + firstsp[n]['name'] + '=1|' + firstsp[n]['name'] + '=2 ->' + probstr + ':(' + firstsp[n][
'name'] + '\'=' + firstsp[n]['name'] + '+2)+' + '1-(' + probstr + ')' + ':(' + firstsp[n][
'name'] + '\'=0' + ');\n');
else:
cmp = groups[n]['components'];
fst = '';
sst = '';
sst2 = '';
first = True;
prob = 0;
for c in cmp :
if c in proteins:
if(first==True):
fst = '('+proteins[c]['name']+'=1|'+proteins[c]['name']+'=2)';
sst = '('+proteins[c]['name']+'\'='+proteins[c]['name']+'+2)';
#sst2 = '(' + proteins[c]['name'] + '\'=' + proteins[c]['name'] +')';
sst2 = '(' + proteins[c]['name'] + '\'=0' + ')';
first = False;
else:
fst += '& ('+proteins[c]['name']+'=1|'+proteins[c]['name']+'=2)';
sst += '& (' + proteins[c]['name']+'\'='+proteins[c]['name']+'+2)';
#sst2 += '& (' + proteins[c]['name'] + '\'=' + proteins[c]['name'] + ')';
sst2 += '& (' + proteins[c]['name'] + '\'=0' + ')';
prob = prob + proteins[c]['prob'];
prob = prob / 6*len(cmp);
probstr = str(prob*0.95);
f.write('[] '+fst+ '->'+probstr+':'+sst+'+'+'1-('+probstr+'):'+sst2+';\n');
first_of_actions = [];
for k in actions.keys():
if k in self.need_action.values():
elm = actions[k];
for a in elm:
ent1 = actions[k][a]['entry1'];
if(ent1 in all_vars and k in all_vars_names):
if(proteins.has_key(ent1)):
f.write('[] '+proteins[ent1]['name']+'=3|'+proteins[ent1]['name']+'=4 -> 0.9:('+k+'\'='+proteins[ent1]['name']+' )+0.1:true;\n');
if(ent1 not in firstsp)and(ent1 not in secondsp):
f.write('[]'+proteins[ent1]['name']+'=1|'+proteins[ent1]['name']+'=2 ->0.8:('+proteins[ent1]['name']+'\'='+proteins[ent1]['name']+'+2)+0.2:('+proteins[ent1]['name']+'\'=0);\n');
else:
cmps = pathway.get_compounds();
if(cmps.has_key(ent1)):
f.write('[] '+cmps[ent1]['name']+'=3|'+cmps[ent1]['name']+'=4 -> 0.9:('+k+'\'='+cmps[ent1]['name']+' )+0.1:true;\n');
f.write('endmodule\n');
f.close();
return filename;
#calculate the last points of trim points probability
def calcute_trim_prob(self, old_last_points={}, new_last_points={}, trim_points=[], pathway=None, degs=None, all_genes=None):
relations = pathway.get_relations();
proteins = pathway.get_proteins();
components = pathway.get_compounds();
actions = pathway.get_actions();
groups = pathway.get_groups();
filename = self.build_prism_model(pathway, trim_points, old_last_points, degs, all_genes);
#build properties
for pointid in new_last_points:
if(str(pointid) in proteins.keys()):
vname = proteins[str(pointid)]['name'];
pro = 'P=? [F<1000(' + vname + '= 3 )]'
profile = 'prop.csl';
prof = open(self.prism_path+'/'+profile, 'w');
prof.write(pro+'\n');
prof.close();
res = self.run_prism_file_in_windows(filename, 'prop.csl');
if (res != None):
new_last_points[pointid]['prob4'] = res;
else:
new_last_points[pointid]['prob4'] = '0.0';
pro = 'P=? [F<1000(' + vname + '= 4 )]'
profile = 'prop.csl';
prof = open(self.prism_path + '/' + profile, 'w');
prof.write(pro + '\n');
prof.close();
res = self.run_prism_file_in_windows(filename, 'prop.csl');
if (res != None):
new_last_points[pointid]['prob4'] = res;
else:
new_last_points[pointid]['prob4'] = '0.0';
|
"""
Имя проекта: practicum_1
Номер версии: 1.0
Имя файла: 24.py
Автор: 2020 © Ю.А. Мазкова, Челябинск
Лицензия использования: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)
Дата создания: 10/12/2020
Дата последней модификации: 10/12/2020
Связанные файлы/пакеты: numpy, random
Описание: Решение задач № 1-101 практикума № 1
Даны вещественные положительные числа a, b, c, d. Выясните, может ли прямоугольник со сторонами a,b уместиться внутри прямоугольника со сторонами c,d так, чтобы каждая сторона внутреннего прямоугольника была параллельна или перпендикулярна стороне внешнего прямоугольника.
#версия Python: 3.9.0
"""
a = int(input("Введите сторону а прямоугольника:"))
b = int(input("Введите сторону b прямоугольника:"))
c = int(input("Введите сторону c прямоугольника:"))
d = int(input("Введите сторону d прямоугольника:"))
if a <= 0 or b <= 0 or c <= 0 or d <= 0:
print("Сторона прямоугольника не может быть отрицательной")
if ((a < c and b < d) or (a < d and b < c)):
print("Прямоугольник a, b сможет уместиться внутри прямоугольника c, d")
else:
print("Прямоугольник a, b НЕ сможет уместиться внутри прямоугольника c, d")
|
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
from guild import util
class REnvError(Exception):
pass
class RScriptProcessError(Exception):
def __init__(self, error_output, returncode):
self.error_output = error_output
self.returncode = returncode
def is_r_script(opspec):
return os.path.isfile(opspec) and opspec[-2:].upper() == ".R"
def verify_r_env():
_verify_r()
_verify_r_package()
def _verify_r():
if not util.which("Rscript"):
raise REnvError(
"R is not installed on this system. Refer to "
"https://www.r-project.org/ for details."
)
def _verify_r_package(min_version="0.0.0.9001"):
installed_version = r_package_version()
if not installed_version:
raise REnvError(
"R package 'guildai' is not installed\n"
"Install it by running 'guild run r-script:init' and try again."
)
if installed_version < min_version:
raise REnvError(
f"R package 'guildai' is too old (got version '{installed_version})'\n"
"Upgrade the package by running 'guild run r-script:init' and try again."
)
def r_package_version():
return run_r(
"cat(if(requireNamespace(\"guildai\")) "
"getNamespaceVersion(\"guildai\") else \"\")"
)
def run_r(
*exprs,
file=None,
infile=None,
vanilla=True,
args=None,
default_packages="base",
**run_kwargs,
):
"""Run R code in a subprocess, return stderr+stdout output in a single string.
This has different defaults from `Rscript`, designed for isolated,
fast invocations.
Args:
`exprs`: strings of individual R expressions to be evaluated sequentially
`file`: path to an R script
`infile`: multiline string of R code, piped into Rscript frontend via stdin.
"""
_check_run_args(exprs, file, infile)
cmd = ["Rscript"]
if default_packages:
cmd.append(f"--default-packages={default_packages}")
if vanilla:
cmd.append("--vanilla")
if file:
cmd.append(file)
elif exprs:
for e in exprs:
cmd.extend(["-e", e])
elif infile:
cmd.append("-")
run_kwargs["input"] = infile.encode()
if args:
cmd.extend(args)
run_kwargs.setdefault("capture_output", True)
try:
return subprocess.run(cmd, check=True, **run_kwargs).stdout.decode()
except subprocess.CalledProcessError as e:
raise RScriptProcessError(e.stderr.decode(), e.returncode) from None
def _check_run_args(exprs, file, infile):
if sum(map(bool, [exprs, file, infile])) != 1:
raise TypeError(
"exprs, file, and infile, are mutually exclusive - only supply one"
)
def r_script_version():
out = subprocess.check_output(
["Rscript", "--version"],
stderr=subprocess.STDOUT,
).decode()
m = re.search(r"R scripting front-end version (.*)", out)
if not m:
raise ValueError(f"unknown version ({out})")
return m.group(1)
|
import sys
#import argpase
import numpy as np
def friendsneeded(shynessfrequency):
npshy = np.array( [int(c) for c in shynessfrequency] )
npshycum = npshy.cumsum()
pplstanding = npshycum[0]
additional = 0
for i in range(len(npshy))[1:]:
pplstanding = npshycum[i-1]+additional
if i>pplstanding:
additional += i - pplstanding
return additional
def getdigits(number):
size = int(np.log10(number)) + 1
digits = []
for s in range(size):
digit = number // 10**s % 10
digits.append(digit)
return digits
def count(inputline):
orgnumber = int(inputline)
if orgnumber==0:
return "INSOMNIA"
# find max digit
seendigits = set()
n = 0
while len(seendigits)<10:
n += 1
number = n*orgnumber
digits = getdigits(number)
for d in digits:
seendigits.add(d)
return "{}".format(number)
if "__main__" == __name__:
print(sys.argv[1])
inputfile = sys.argv[1]
out = []
with open(inputfile, 'r') as f:
T = int(f.readline())
for _ in range(T):
out.append(count(f.readline()))
with open("out_"+inputfile, 'w') as f:
for i, o in enumerate(out):
f.write("Case #{}: {}\n".format(i+1, o))
print("Case #{}: {}\n".format(i+1, o))
print(out) |
from pyDatalog import pyDatalog
pyDatalog.create_terms('X,Y')
pyDatalog.create_terms('trash') # type of item
pyDatalog.create_terms('plasticContainer,paperContainer,organicContainer,glassContainer,mixedContainer') # types of containers
pyDatalog.create_terms('plastic,glass,paper,cardboard,organic') # material
pyDatalog.create_terms('color,elastic,fragile,white,containsInk') # properties
trash['unknownObject2']=1
color['unknownObject2']=1
fragile['unknownObject2']=0.8
trash['unknownObject1']=1
color['unknownObject1']=1
elastic['unknownObject1']=1
trash['unknownObject3']=1
white['unknownObject3']=0.8
containsInk['unknownObject3']=0.6
plastic(X) <= (trash[X]==1) & (color[X]==1) & (elastic[X]>0.8)
glass(X) <= (trash[X]==1) & (color[X]==1) & (fragile[X]>0.5)
paper(X) <= (trash[X]==1) & (white[X]>0.5) & (containsInk[X]>0.3)
glassContainer(X) <= glass(X)
plasticContainer(X) <= plastic(X)
paperContainer(X) <= paper(X)
print(trash['unknownObject2'])
|
from lost_and_found_app import models
from django.forms import ModelForm
class PostCreateForm(ModelForm):
class Meta:
model = models.Post
fields = ("title", "description", "author_name", "author_phone")
class PostEditForm(ModelForm):
class Meta:
model = models.Post
fields = ("title", "description", "author_name", "author_phone", "found")
class ObjectForm(ModelForm):
class Meta:
model = models.Object
fields = "__all__"
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from functools import partial
import attr
from commoncode.cliutils import PluggableCommandLineOption
from commoncode.cliutils import OTHER_SCAN_GROUP
from commoncode.cliutils import SCAN_OPTIONS_GROUP
from plugincode.scan import ScanPlugin
from plugincode.scan import scan_impl
@scan_impl
class UrlScanner(ScanPlugin):
"""
Scan a Resource for URLs.
"""
resource_attributes = dict(urls=attr.ib(default=attr.Factory(list)))
sort_order = 10
options = [
PluggableCommandLineOption(('-u', '--url',),
is_flag=True, default=False,
help='Scan <input> for urls.',
help_group=OTHER_SCAN_GROUP),
PluggableCommandLineOption(('--max-url',),
type=int, default=50,
metavar='INT',
required_options=['url'],
show_default=True,
help='Report only up to INT urls found in a file. Use 0 for no limit.',
help_group=SCAN_OPTIONS_GROUP),
]
def is_enabled(self, url, **kwargs):
return url
def get_scanner(self, max_url=50, **kwargs):
from scancode.api import get_urls
return partial(get_urls, threshold=max_url)
|
class Solution:
def minMoves(self, target: int, maxDoubles: int) -> int:
res = 0
while target != 1:
if target % 2 == 1 and maxDoubles != 0:
target -= 1
elif maxDoubles > 0:
target = target / 2
maxDoubles -= 1
elif maxDoubles == 0:
res += target - 1
return int(res)
res += 1
return int(res)
if __name__ == '__main__':
print(Solution().minMoves(10, 4))
|
# ////////////////////////////////////////////////////////////////////////////
# ----------------------------------------------------------------------------
#
# Amnesia - Layer 1 binary analysis system.
# Copyright (c) 2013 Tristan Strathearn (r3oath@gmail.com)
# Website: www.r3oath.com
#
# ----------------------------------------------------------------------------
# ////////////////////////////////////////////////////////////////////////////
import sys
import re
import pefile
import peutils
import pydasm
import struct
# ----------------------------------------------------------------------------
print """
_____ __ __ __ __ _____ ______ __ _____
/\___/\ /_/\ /\_\ /_/\ /\_\ /\_____\/ ____/\ /\_\ /\___/\\
/ / _ \ \ ) ) \/ ( ( ) ) \ ( (( (_____/) ) __\/ \/_/ / / _ \ \\
\ \(_)/ //_/ \ / \_\/_/ \ \_\\\\ \__\ \ \ \ /\_\\\\ \(_)/ /
/ / _ \ \\\\ \ \\\\// / /\ \ \ / // /__/_ _\ \ \ / / // / _ \ \\
( (_( )_) ))_) )( (_( )_) \ (_(( (_____\)____) )( (_(( (_( )_) )
\/_/ \_\/ \_\/ \/_/ \_\/ \/_/ \/_____/\____\/ \/_/ \/_/ \_\/
Layer 1 binary analysis system.
Created by Tristan Strathearn (www.r3oath.com)
"""
# ----------------------------------------------------------------------------
def getInput(question, default_answer='', override=False):
required = True if default_answer == '' else False
if default_answer == '':
formatted_string = '%s: ' % question
else:
formatted_string = '%s [%s]: ' % (question, default_answer)
if override is True:
formatted_string = '%s ' % question
required = False
while True:
input_ = raw_input(formatted_string).strip()
len_ = len(input_)
if len_ > 0: return input_
elif len_ == 0 and not required: return default_answer
def print_(message):
print '%s' % message
def reportError(message):
print_('ERROR: %s!' % message)
def spacer():
print_('')
def drawLine():
sys.stdout.write('>' * 80)
def getDataDump(file_):
f = open(file_, 'rb')
data = f.read()
f.close()
return data
# ----------------------------------------------------------------------------
# Hehe.
class ChocolateLog:
log = []
def __init__(self):
self.log = []
def add(self, message):
self.log.append(message)
def writeToFile(self):
file_ = getInput('Please specify a file name')
try:
f = open(file_, 'w')
for entry in self.log:
f.write('%s\n' % entry)
f.close()
except:
reportError('ChocolateLog failed to write to "%s"' % file_)
def interactiveOutput(self):
choice = getInput('Save to file instead of viewing?', 'N').upper()
if choice.count('Y') > 0:
self.writeToFile()
return
print_('Press Enter to scroll down, (S)ave or (Q)uit.')
spacer()
base = 0
step_size = 20
while True:
index = 0
for entry in self.log[base:]:
print_(entry)
index += 1
if index >= step_size:
break
base += index
index = 0
if base >= len(self.log):
break
choice = getInput('...',
override=True).upper()
if choice.count('Q') > 0:
return
elif choice.count('S') > 0:
self.writeToFile()
return
else:
continue
# ----------------------------------------------------------------------------
SUBJECT = None
ORIG_FILE = None
def loadSubject():
global SUBJECT
global ORIG_FILE
ORIG_FILE = getInput('Please enter the file to process')
try:
SUBJECT = pefile.PE(ORIG_FILE)
print_('Loaded the binary "%s"' % ORIG_FILE)
except:
reportError('Cannot open the file "%s"' % ORIG_FILE)
loadSubject()
loadSubject()
# ----------------------------------------------------------------------------
def strings_display_all():
try:
min_ = int(getInput('Minimum string length', '5'))
max_ = int(getInput('Maximum string length', '999'))
except ValueError:
reportError('Please specify only numeric values')
return
clog = ChocolateLog()
clog.add('Strings:')
data = getDataDump(ORIG_FILE)
search_str = '[\x20-\x7F]{%i,%i}' % (min_, max_)
for string_ in re.findall(search_str, data):
string_ = string_.strip()
if len(string_) > 0:
clog.add('\t%s' % string_)
clog.interactiveOutput()
def strings_search():
try:
min_ = int(getInput('Minimum string length', '5'))
max_ = int(getInput('Maximum string length', '999'))
except ValueError:
reportError('Please specify only numeric values')
return
keyword = getInput('Keyword to search for').lower()
clog = ChocolateLog()
clog.add('Strings containing the keyword "%s":' % keyword)
data = getDataDump(ORIG_FILE)
search_str = '[\x20-\x7F]{%i,%i}' % (min_, max_)
for string_ in re.findall(search_str, data):
string_ = string_.strip()
if string_.lower().count(keyword) > 0:
clog.add('\t%s' % string_)
clog.interactiveOutput()
def misc_verify_checksum():
verified = SUBJECT.verify_checksum()
if verified: print_('Checksum is valid.')
else: print_('Checksum is invalid.')
def misc_generate_checksum():
print_('Checksum: 0x%08X' % SUBJECT.generate_checksum())
def imports_dump_all():
clog = ChocolateLog()
clog.add('Imports:')
for desc in SUBJECT.DIRECTORY_ENTRY_IMPORT:
clog.add('\t%s' % desc.dll)
for import_ in desc.imports:
if import_.import_by_ordinal is not True:
clog.add('\t\t%s' % import_.name)
else:
clog.add('\t\tOrdinal: %i' % import_.ordinal)
clog.add('Delayed Imports:')
for desc in SUBJECT.DIRECTORY_ENTRY_DELAY_IMPORT:
clog.add('\t%s' % desc.dll)
for import_ in desc.imports:
if import_.import_by_ordinal is not True:
clog.add('\t\t%s' % import_.name)
else:
clog.add('\t\tOrdinal: %i' % import_.ordinal)
clog.interactiveOutput()
def imports_search():
keyword = getInput('Keyword to search for').lower()
clog = ChocolateLog()
clog.add('Imports containing the keyword "%s":' % keyword)
for desc in SUBJECT.DIRECTORY_ENTRY_IMPORT:
for import_ in desc.imports:
if import_.import_by_ordinal is not True:
if import_.name.lower().count(keyword) > 0:
clog.add('\t%s (%s)' % (import_.name, desc.dll))
for desc in SUBJECT.DIRECTORY_ENTRY_DELAY_IMPORT:
for import_ in desc.imports:
if import_.import_by_ordinal is not True:
if import_.name.lower().count(keyword) > 0:
clog.add('\t%s (%s)' % (import_.name, desc.dll))
clog.interactiveOutput()
def exports_dump_all():
try:
clog = ChocolateLog()
clog.add('Exports:')
for symbol in SUBJECT.DIRECTORY_ENTRY_EXPORT.symbols:
if symbol.name is not None:
if symbol.forwarder is None:
clog.add('\tOrdinal: %i / %s' %
(symbol.ordinal, symbol.name))
else:
clog.add('\t%s -> %s' %
(symbol.name, symbol.forwarder))
else:
if symbol.forwarder is None:
clog.add('\tOrdinal: %i / %s' %
(symbol.ordinal, '<Exported by ordinal>'))
else:
clog.add('\tOrdinal %i -> %s' %
(symbol.ordinal, symbol.forwarder))
clog.interactiveOutput()
except:
print_('Could not be processed (binary may not export anything)')
def exports_search():
try:
keyword = getInput('Keyword to search for').lower()
clog = ChocolateLog()
clog.add('Exports containing the keyword "%s":' % keyword)
for symbol in SUBJECT.DIRECTORY_ENTRY_EXPORT.symbols:
if symbol.name is None:
continue
if symbol.name.lower().count(keyword) == 0:
continue
if symbol.name is not None:
if symbol.forwarder is None:
clog.add('\tOrdinal: %i / %s' %
(symbol.ordinal, symbol.name))
else:
clog.add('\t%s -> %s' %
(symbol.name, symbol.forwarder))
else:
if symbol.forwarder is None:
clog.add('\tOrdinal: %i / %s' %
(symbol.ordinal, '<Exported by ordinal>'))
else:
clog.add('\tOrdinal %i -> %s' %
(symbol.ordinal, symbol.forwarder))
clog.interactiveOutput()
except:
print_('Could not be processed (binary may not export anything)')
def exports_build_vs_pragma_forwards():
s_by_name = '#pragma comment(linker, "/export:%s=%s.%s")\n'
s_by_ord = '#pragma comment(linker, "/export:ord%i=%s.#%i,@%i,NONAME")\n'
try:
new_dll = getInput('DLL name to forward too').lower().strip('.dll')
clog = ChocolateLog()
for symbol in SUBJECT.DIRECTORY_ENTRY_EXPORT.symbols:
if symbol.name is not None:
clog.add(s_by_name % (symbol.name, new_dll, symbol.name))
else:
clog.add(s_by_ord %
(symbol.ordinal, new_dll, symbol.ordinal, symbol.ordinal))
clog.interactiveOutput()
except:
print_('Could not be processed (binary may not export anything)')
def __assembly_offset(offset):
return '%08X' % offset
def assembly_disassemble():
try:
max_bytes = int(getInput('Number of bytes to disassemble'))
except ValueError:
reportError('Please specify only numeric values')
return
clog = ChocolateLog()
OEP = SUBJECT.OPTIONAL_HEADER.AddressOfEntryPoint
OEP_base = OEP + SUBJECT.OPTIONAL_HEADER.ImageBase
data = SUBJECT.get_memory_mapped_image()[OEP:]
offset = 0
while offset < max_bytes:
ins = pydasm.get_instruction(data[offset:], pydasm.MODE_32)
if ins is None:
asm = 'db %02x' % ord(data[offset])
clog.add('%s\t%s' % (__assembly_offset(offset), asm))
offset += 1
continue
asm = pydasm.get_instruction_string(ins, pydasm.FORMAT_INTEL,
OEP_base + offset)
clog.add('%s\t%s' % (__assembly_offset(offset), asm))
offset += ins.length
clog.interactiveOutput()
def metadata_subject_overview():
if SUBJECT.is_exe() is True:
print_('Binary "%s" is an EXE' % ORIG_FILE)
if SUBJECT.is_dll() is True:
print_('Binary "%s" is a DLL' % ORIG_FILE)
flagged = False
if peutils.is_probably_packed(SUBJECT) is True:
print_('Binary is possibly packed!')
flagged = True
if peutils.is_suspicious(SUBJECT) is True:
print_('Binary is suspicious!')
flagged = True
if flagged is False:
print_('Binary appears to be normal')
print_('Address of Entry Point: 0x%08x' %
SUBJECT.OPTIONAL_HEADER.AddressOfEntryPoint)
misc_generate_checksum()
misc_verify_checksum()
print_('Sections:')
for section in SUBJECT.sections:
print_('\tRVA: 0x%08x - Name: %s - %i bytes' %
(section.VirtualAddress, section.Name.strip('\x00'),
section.SizeOfRawData))
print_('Imports from:')
for entry in SUBJECT.DIRECTORY_ENTRY_IMPORT:
count = 0
for i in entry.imports:
count += 1
print_('\t%s -> %i functions' % (entry.dll, count))
def callback():
reportError('Callback not yet implemented')
# ----------------------------------------------------------------------------
menu_tree = [
{'Section': 'Strings', 'Contents': [
{'Option': 'Dump All', 'Callback': strings_display_all},
{'Option': 'Search by keyword', 'Callback': strings_search},
]},
{'Section': 'Imports', 'Contents': [
{'Option': 'Dump All', 'Callback': imports_dump_all},
{'Option': 'Search by keyword', 'Callback': imports_search},
]},
{'Section': 'Exports', 'Contents': [
{'Option': 'Dump All', 'Callback': exports_dump_all},
{'Option': 'Search by keyword', 'Callback': exports_search},
{'Option': 'Build VS #Pragma forwards',
'Callback': exports_build_vs_pragma_forwards},
]},
{'Section': 'Assembly', 'Contents': [
{'Option': 'Disassemble at OEP', 'Callback': assembly_disassemble},
]},
{'Section': 'Metadata', 'Contents': [
{'Option': 'Binary Overview', 'Callback': metadata_subject_overview},
]},
{'Section': 'Misc', 'Contents': [
{'Option': 'Verify Checksum', 'Callback': misc_verify_checksum},
{'Option': 'Generate Checksum', 'Callback': misc_generate_checksum},
{'Option': 'Load new binary', 'Callback': loadSubject}
]},
]
# ----------------------------------------------------------------------------
class Menu:
cur_section = None
def __init__(self, menu_tree=None):
self.tree = menu_tree
def display(self):
spacer()
if self.cur_section is None:
self.display_root()
else:
self.display_section()
def display_root(self):
index = 0
for e in self.tree:
print_('....[%i] %s' % (index, e['Section']))
index += 1
user_choice = None
try:
user_choice = int(getInput('Menu selection'))
except ValueError:
user_choice = None
if user_choice >= index or user_choice < 0 or user_choice == None:
reportError('Invalid menu selection')
self.display()
else:
self.cur_section = user_choice
self.display()
def display_section(self):
index = 0
print_('....[+] %s' % self.tree[self.cur_section]['Section'])
for e in self.tree[self.cur_section]['Contents']:
print_('........[%i] %s' % (index, e['Option']))
index += 1
print_('........[%i] %s' % (index, 'Main menu'))
user_choice = None
try:
user_choice = int(getInput('Menu selection'))
except ValueError:
user_choice = None
if user_choice > index or user_choice < 0 or user_choice == None:
reportError('Invalid menu selection')
self.display()
elif user_choice == index:
self.cur_section = None
self.display()
else:
# Bit of visual formatting happening here before the callback.
spacer()
drawLine()
spacer()
self.tree[self.cur_section]['Contents'][user_choice]['Callback']()
spacer()
drawLine()
self.display()
# ----------------------------------------------------------------------------
AmnesiaMenu = Menu(menu_tree)
try:
AmnesiaMenu.display()
except KeyboardInterrupt:
print_('\n\nGoodbye.')
except Exception as e:
reportError('Unhandled exception caught')
reportError('%s' % e)
|
#python3
'''
Suppose Andy and Doris want to choose a restaurant for dinner, and they both have a list of favorite restaurants represented by strings.
You need to help them find out their common interest with the least list index sum.
If there is a choice tie between answers, output all of them with no order requirement. You could assume there always exists an answer.
Example 1:
Input:
["Shogun", "Tapioca Express", "Burger King", "KFC"]
["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]
Output: ["Shogun"]
Explanation: The only restaurant they both like is "Shogun".
Example 2:
Input:
["Shogun", "Tapioca Express", "Burger King", "KFC"]
["KFC", "Shogun", "Burger King"]
Output: ["Shogun"]
Explanation: The restaurant they both like and have the least index sum is "Shogun" with index sum 1 (0+1).
Note:
The length of both lists will be in the range of [1, 1000].
The length of strings in both lists will be in the range of [1, 30].
The index is starting from 0 to the list length minus 1.
No duplicates in both lists.
'''
def findRestaurant(list1, list2):
commonRestaurants = set(list1) & set(list2)
if len(commonRestaurants) == 1:
return list(commonRestaurants)
else:
commonRestaurantsDict = {}
for commonRestaurant in commonRestaurants:
commonRestaurantsDict[commonRestaurant] = list1.index(commonRestaurant) + list2.index(commonRestaurant)
commonRestaurantsList =[]
for restaurant in commonRestaurantsDict:
if commonRestaurantsDict[restaurant] == min(commonRestaurantsDict.values()):
commonRestaurantsList.append(restaurant)
return commonRestaurantsList
if __name__ == '__main__':
print(findRestaurant(["Shogun", "Tapioca Express", "Burger King", "KFC"],["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]))
print(findRestaurant(["Shogun", "Tapioca Express", "Burger King", "KFC"],["KFC", "Shogun", "Burger King"]))
|
def adapt_to_ex(model):
self.conv_sections = conv_sections
self.glob_av_pool = torch.nn.AdaptiveAvgPool2d(output_size=1)
self.linear_sections = linear_sections
self.head = head
self.input_shape = input_shape
self.n_classes = head.out_elements
self.data_augment = DataAugmentation(n_classes=self.n_classes)
self.loss_func_CE_soft = CrossEntropyWithProbs().to(device)
self.loss_func_CE_hard = torch.nn.CrossEntropyLoss().to(device)
self.loss_func_MSE = torch.nn.MSELoss().to(device)
self.creation_time = datetime.now()
config = {}
config['n_conv_l'] = len(model.conv_sections)
config['n_conv_0'] = model.conv_sections[0].out_elements
config['n_conv_1'] = model.conv_sections[1].out_elements if len(model.conv_sections) > 1 else 16
config['n_conv_2'] = model.conv_sections[2].out_elements if len(model.conv_sections) > 2 else 16
# Dense
config['n_fc_l'] = len(model.linear_sections)
config['n_fc_0'] = model.linear_sections[0].out_elements
config['n_fc_1'] = model.linear_sections[1].out_elements if len(model.linear_sections) > 1 else 16
config['n_fc_2'] = model.linear_sections[2].out_elements if len(model.linear_sections) > 2 else 16
# Kernel Size
config['kernel_size'] = model.linear_sections[0].kernel_size
# Learning Rate
lr = RangeParameter('lr_init', ParameterType.FLOAT, 0.00001, 1.0, True)
# Use Batch Normalization
bn = ChoiceParameter('batch_norm', ParameterType.BOOL, values=[True, False])
# Batch size
bs = RangeParameter('batch_size', ParameterType.INT, 1, 512, True)
# Global Avg Pooling
ga = ChoiceParameter('global_avg_pooling', ParameterType.BOOL, values=[True, False])
b = FixedParameter('budget', ParameterType.INT, 25)
i = FixedParameter('id', ParameterType.STRING, 'dummy')
|
# Copyright 2015 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenHTF name imports for convenience.
Use 'from openhtf.names import *' at the top of a test script to map commonly
used names:
Decorators for test phase functions:
@measures Attach measurements to test phases.
@plug Use a hardware plug in a test phase.
@TestPhase Make a test phase out of any function.
Classes for instantiation:
Measurement A measurement to be taken within a test phase.
Unit codes for lookup:
UOM Reference for SI units and their codes.
"""
import openhtf
import openhtf.io.user_input
import openhtf.plugs
import openhtf.util.measurements
import openhtf.util.monitors
import openhtf.util.units
# pylint: disable=invalid-name
# Pseudomodules.
prompts = openhtf.io.user_input.get_prompt_manager()
triggers = openhtf.exe.triggers
# Functions used in writing test scripts.
measures = openhtf.util.measurements.measures
monitors = openhtf.util.monitors.monitors
plug = openhtf.plugs.requires
# Classes used in writing test scripts.
Measurement = openhtf.util.measurements.Measurement
OutputToJSON = openhtf.OutputToJSON
TestPhase = openhtf.TestPhase
UOM = openhtf.util.units.UOM
|
import collections
import logging
import threading
import time
_REPORT_INTERVAL_S = 1.0
_INDENT = ' '
_MIN_REMAINDER = 0.01
class Profiled(object):
# Shared state; _lock must be held while using these values.
_stacks_by_threadid = collections.defaultdict(lambda: list())
_last_report_time = time.time()
_reports_by_name = {}
_lock = threading.Lock()
def __init__(self, name):
with self._lock:
self._stack = self._stacks_by_threadid[threading.current_thread().ident]
self._report = self._reports_by_name.get(name)
if not self._report:
self._report = _Report(name, len(self._stack))
if self._stack:
self._stack[-1].children.append(self._report)
with self._lock:
self._reports_by_name[name] = self._report
def __enter__(self):
self._report.start = time.time()
self._stack.append(self._report)
def __exit__(self, excClass, excObj, tb):
self._report.durations.append(time.time() - self._report.start)
self._report.start = None
self._stack.pop()
with self._lock:
self._MaybePrintReport(self._stack)
@classmethod
def _MaybePrintReport(cls, stack):
# Always print if we're exiting the root profiled code, to avoid losing
# data. Otherwise, wait to print until the reporting interval has elapsed.
if stack and (
time.time() - Profiled._last_report_time < _REPORT_INTERVAL_S):
return
# Log all available reports.
cls._last_report_time = time.time()
root_reports = [r for r in cls._reports_by_name.values() if r.level == 0]
for report in root_reports:
logging.info('\n'.join([''] + cls._GetReportLines(report, 0)))
# After printing report details, remove anything that's not current. Thus
# each reporting period covers only what happened since the last one.
# (Without pruning, reports would be cumulative.)
cls._reports_by_name = {}
for root in root_reports:
cls._PruneReports(None, root)
@classmethod
def _PruneReports(cls, parent, report):
"""
Returns:
True if this report should be kept around, False if it can be dropped.
"""
# Keep reports which are currently in context.
if not (report.start is not None or (parent and not parent.durations)):
return False
cls._reports_by_name[report.name] = report
old_children = list(report.children)
report.children = []
for child in old_children:
old_duration = sum(child.durations)
if cls._PruneReports(report, child):
report.children.append(child)
report.past_child_durations += old_duration
report.durations = []
return True
@classmethod
def _GetReportLines(cls, report, level):
"""Format one timing summary line for this report, plus lines for children.
Returns:
A list of lines like "<report name> 2 * 1.16s = 2.32s". Each line will
have an appropriate amount of whitespace padding for indentation, based
on the given level.
"""
lines = []
if report.durations:
total = sum(report.durations)
ave = total / len(report.durations)
max_value = max(report.durations)
lines.append(
'%s%s %d * %.2fs = %.2fs%s' %
(level * _INDENT,
report.name,
len(report.durations),
ave,
total,
'' if max_value < ave * 10 else ' (max %.2fs)' % max_value))
elif report.start is not None:
total = time.time() - report.start
lines.append(
'%s%s %.2fs (partial)'
% (level * _INDENT, report.name, total))
else:
total = 0
lines.append('%s%s (cleared)' % (level * _INDENT, report.name))
total -= report.past_child_durations
for child in report.children:
lines += cls._GetReportLines(child, level + 1)
total -= sum(child.durations)
if report.children and total >= _MIN_REMAINDER:
lines.append('%sremainder %.2fs' % ((level + 1) * _INDENT, total))
return lines
class _Report(object):
"""Record of past execution times for one profiled block of code."""
def __init__(self, name, level):
# An informational / user-provided name for the profiled block of code.
self.name = name
# For formatting, how deeply nested the profiled code is.
self.level = level
# When this block of code was most recently started. None if the block
# is not currently running (not in context).
self.start = None
# List of durations (in seconds) this code block has taken. Used to
# summarize how ave/max times.
self.durations = []
self.past_child_durations = 0
# List of child _Reports. All children are executed fully within the parent,
# but there may be some parent execution time which is not subdivided
# (reported as "remainder").
self.children = []
if __name__ == '__main__':
import random
logging.basicConfig(
format='%(levelname)s %(asctime)s %(filename)s:%(lineno)s: %(message)s',
level=logging.INFO)
# At its simplest, we can wrap a block of code in a Profiled context guard,
# and see its timing information printed out when it's done.
with Profiled('initial block, should take about 1 second'):
time.sleep(1.0)
# We can also wrap a long-running (or even an infinite loop), and get periodic
# reports of how it's currently performing.
with Profiled('longstanding root'):
while True:
# This is a child Profiled.
with Profiled('main'):
for _ in xrange(10):
with Profiled('short'):
r = random.random()
# Usually, only the average execution time is reported. But if a block
# of code has highly variable execution time, the max is reported too.
with Profiled('will report outlier max'):
if r > 0.95:
time.sleep(1.0)
with Profiled('long with subsection'):
for _ in xrange(1000):
for _ in xrange(100):
with Profiled('extremely frequent'):
pass
# This statement is part of the 'main' block, but not in any of its
# children. It gets called out in the 'remainder', so we notice that
# the sum of the children is less than the parent.
time.sleep(r)
|
# 1065 - pares entre 5 números
n = 5
qtd_pares = 0
qtd_positivos = 0
qtd_negativos = 0
while (n > 0):
v = int(input())
if (v % 2 == 0):
qtd_pares += 1
if (v > 0):
qtd_positivos += 1
if (v < 0):
qtd_negativos += 1
n -= 1
print("{} valor(es) par(es)".format(qtd_pares))
print("{} valor(es) impar(es)".format(5 - qtd_pares))
print("{} valor(es) positivo(s)".format(qtd_positivos))
print("{} valor(es) negativo(s)".format(qtd_negativos))
|
import itertools
import operator
import matplotlib
matplotlib.rcParams['interactive'] == True
matplotlib.interactive(True)
from matplotlib import pyplot as plt
from skimage import color
from skimage import io
import numpy as np
import re
import csv
import os
import aire
image1 = io.imread("images\\resultats\\region_seg\\original\\p2\\savedImage2133.bmp")
result = aire.getAreas(image1, True)
image2 = result[0]
plt.figure(1)
plt.imshow(image1)
plt.figure(2)
plt.imshow(image2)
plt.show()
print(result[1])
input("waiting...")
plt.close() |
'''
Created on Jun 3, 2016
@author: Daniel
'''
import unittest
from poker.Deck import Hand, Card
from poker.WinPatterns import HighCard, Pair, TwoPair, ThreeOfAKind, Straight, Flush, FullHouse, FourOfAKind, StraightFlush, RoyalFlush
class TestHighCard(unittest.TestCase):
def test_low_high_card(self):
bad_hand = Hand(["7D", "2H", "3D", "5C", "4S"])
self.assertEqual("7", HighCard(bad_hand).values().rank)
def test_ace_high(self):
good_hand = Hand(["AD", "KD", "QD", "JD", "TD"])
self.assertEqual("A", HighCard(good_hand).values().rank)
def test_ace_trumps_king(self):
ace = HighCard(Hand(["AD"]))
king = HighCard(Hand(["KD"]))
self.assertTrue(Card("AD"), ace.values())
self.assertTrue(ace.trumps(king))
def test_to_string(self):
ace = HighCard(Hand(["AS"]))
self.assertEqual("HighCard (AS)", str(ace))
class TestPair(unittest.TestCase):
def test_is_pair(self):
pair = Hand(["5H", "5S"])
self.assertTrue(Pair(pair).criterion())
def test_is_not_pair(self):
pair = Hand(["4S", "5S"])
self.assertFalse(Pair(pair).criterion())
def test_know_rank_of_pair(self):
hand = Hand(["7S", "2H", "3D", "7C", "KD"])
self.assertEqual("7", Pair(hand).values().rank)
def test_to_string(self):
pair = Pair(Hand(["5H", "5S"]))
self.assertEqual("Pair (5)", str(pair))
class TestTwoPair(unittest.TestCase):
def test_is_two_pair(self):
two_pair = Hand(["5H", "5S", "8H", "8D"])
self.assertTrue(TwoPair(two_pair).criterion())
def test_lone_pair_is_not_two_pair(self):
pair = Hand(["4S", "4S", "7H", "8D"])
self.assertFalse(TwoPair(pair).criterion())
def test_know_ranks_of_two_pair(self):
hand = Hand(["7S", "3H", "3D", "7C", "KD"])
self.assertEqual(["3", "7"], TwoPair(hand).values())
def test_to_string(self):
two_pair = TwoPair(Hand(["5H", "5S", "8H", "8D"]))
self.assertEqual("TwoPair (5,8)", str(two_pair))
class TestThreeOfAKind(unittest.TestCase):
def test_is_three_of_a_kind(self):
three_of_a_kind = Hand(["5H", "5S", "5D"])
self.assertTrue(ThreeOfAKind(three_of_a_kind).criterion())
def test_pair_is_not_three_of_a_kind(self):
pair = Hand(["4S", "5S", "5D"])
self.assertFalse(ThreeOfAKind(pair).criterion())
def test_four_of_a_kind_is_not_three_of_a_kind(self):
hand = Hand(["7S", "7H", "3D", "7C", "7D"])
self.assertFalse(ThreeOfAKind(hand).criterion())
def test_know_rank_of_three_of_a_kind(self):
hand = Hand(["7S", "7H", "3D", "7C", "KD"])
self.assertEqual("7", ThreeOfAKind(hand).values().rank)
def test_to_string(self):
three_of_a_kind = ThreeOfAKind(Hand(["5H", "5S", "5D"]))
self.assertEqual("ThreeOfAKind (5)", str(three_of_a_kind))
class TestStraight(unittest.TestCase):
def test_small_straight(self):
small_straight = Hand(["2S", "3D", "4C", "5H", "6C"])
self.assertTrue(Straight(small_straight).criterion())
def test_is_not_straight(self):
not_straight = Hand(["8S", "3D", "4C", "5H", "6C"])
self.assertFalse(Straight(not_straight).criterion())
def test_big_straight(self):
big_straight = Hand(["AS", "KD", "QH", "JS", "TC"])
self.assertTrue(Straight(big_straight).criterion())
def test_to_string(self):
straight = Straight(Hand(["2S", "3D", "4C", "5H", "6C"]))
self.assertEqual("Straight (2-6)", str(straight))
class TestFlush(unittest.TestCase):
def test_is_a_flush(self):
flush = Hand(["5D", "6D", "2D", "KD", "TD"])
self.assertTrue(Flush(flush).criterion())
def test_one_not_same_suit(self):
not_flush = Hand(["5D", "6D", "2H", "KD", "TD"])
self.assertFalse(Flush(not_flush).criterion())
def test_to_string(self):
flush = Flush(Hand(["5D", "6D", "2D", "KD", "TD"]))
self.assertEquals("Flush (D)", str(flush))
class TestFullHouse(unittest.TestCase):
def test_is_full_house(self):
full_house = Hand(["4S", "4D", "7H", "7C", "7D"])
self.assertTrue(FullHouse(full_house).criterion())
def test_two_pair_is_not_full_house(self):
two_pair = Hand(["4S", "4D", "7H", "7C", "8D"])
self.assertFalse(FullHouse(two_pair).criterion())
def test_suits_do_not_make_a_full_house(self):
suit_house = Hand(["2S", "4S", "7H", "6H", "8H"])
self.assertFalse(FullHouse(suit_house).criterion())
def test_to_string(self):
full_house = FullHouse(Hand(["4S", "4D", "7H", "7C", "7D"]))
self.assertEquals("FullHouse (7,4)", str(full_house))
class TestFourOfAKind(unittest.TestCase):
def test_is_four_of_a_kind(self):
four_of_a_kind = Hand(["5H", "5S", "5D", "5C"])
self.assertTrue(FourOfAKind(four_of_a_kind).criterion())
def test_pair_is_not_four_of_a_kind(self):
pair = Hand(["4S", "5S", "5D"])
self.assertFalse(FourOfAKind(pair).criterion())
def test_three_of_a_kind_is_not_four_of_a_kind(self):
hand = Hand(["7S", "7H", "3D", "2C", "7D"])
self.assertFalse(FourOfAKind(hand).criterion())
def test_know_rank_of_four_of_a_kind(self):
hand = Hand(["7S", "7H", "3D", "7C", "7D"])
self.assertEqual("7", FourOfAKind(hand).values().rank)
def test_to_string(self):
four_of_a_kind = FourOfAKind(Hand(["5H", "5S", "5D", "5C"]))
self.assertEqual("FourOfAKind (5)", str(four_of_a_kind))
class TestStraightFlush(unittest.TestCase):
def test_is_straight_flush(self):
hand = Hand(["2S", "3S", "4S", "5S", "6S"])
self.assertTrue(StraightFlush(hand).criterion())
def test_straight_is_not_always_straight_flush(self):
hand = Hand(["2S", "3D", "4S", "5S", "6S"])
self.assertFalse(StraightFlush(hand).criterion())
def test_flush_is_not_always_straight_flush(self):
hand = Hand(["2S", "3S", "4S", "5S", "7S"])
self.assertFalse(StraightFlush(hand).criterion())
def test_to_string(self):
straight_flush = StraightFlush(Hand(["2S", "3S", "4S", "5S", "6S"]))
self.assertEqual("StraightFlush (2-6S)", str(straight_flush))
class TestRoyalFlush(unittest.TestCase):
def test_royal_flush(self):
hand = Hand(["AD", "KD", "QD", "JD", "TD"])
self.assertTrue(RoyalFlush(hand).criterion())
def test_straight_flush_but_not_royal(self):
hand = Hand(["KD", "QD", "JD", "TD", "9D"])
self.assertFalse(RoyalFlush(hand).criterion())
def test_straight_but_not_royal_flush(self):
hand = Hand(["AS", "KD", "QD", "JD", "TD"])
self.assertFalse(RoyalFlush(hand).criterion())
def test_to_string(self):
hand = RoyalFlush(Hand(["AD", "KD", "QD", "JD", "TD"]))
self.assertEquals("RoyalFlush (D)", str(hand))
|
# Generated by Django 2.2.6 on 2019-10-19 10:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('twix', '0002_auto_20191019_1457'),
]
operations = [
migrations.AddField(
model_name='task',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks', to='twix.Group'),
),
migrations.AddField(
model_name='task',
name='is_assigned',
field=models.BooleanField(default=False),
),
]
|
class InsufficientIngredientsException(Exception):
"""Raised when ingrediets are not sufficient"""
pass
class IngredientsOverflowException(Exception):
"""Raised when ingredients overflow"""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.