index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,300 | 11072601e31ceba13f8adf6c070f84ca5add35e9 | # 5/1/2020
# Import median function from numpy
import numpy as np
from numpy import median
# Plot the median number of absences instead of the mean
sns.catplot(x="romantic", y="absences",
data=student_data,
kind="point",
hue="school",
ci=None,
estimator = median)
# Show plot
plt.show() |
3,301 | 6ca7b896cc20220f790c06d4ba08fef7bda8400f | # test CurlypivSetup
"""
Notes about program
"""
# 1.0 import modules
import numpy as np
from skimage import io
import glob
from os.path import join
import matplotlib.pyplot as plt
from curlypiv.utils.calibrateCamera import measureIlluminationDistributionXY, calculate_depth_of_correlation, calculate_darkfield, plot_field_depth
# 2.0 define class
class CurlypivTestSetup(object):
def __init__(self, name, chip, optics, fluid_handling_system):
"""
All the "settings" used in the experimental setup:
1. chip (class)
1.1 solid material (class) (e.g. SiO2)
1.1.1 transparency
1.1.2 fluorescence spectral characteristics
1.1.3 surface charge density
1.1.4 %/vol (here would be 100%)
1.2 channel (class)
1.2.1 height
1.2.2 width
1.2.3 length
1.3 reservoir volume
1.4 electrode configuration (class)
1.4.1 material
1.4.2 separation distance
1.4.3 distance to channel entrance
2. test solution (class)
2.1 liquid material (class) (e.g. electrolyte)
2.1.1 chemical species (e.g. KCl)
2.1.2 concentration
2.1.3 measurable quantity (class) (e.g. conductivity)
2.1.3.1 theoretical
2.1.3.2 measured
2.1.3.2.1 measured conductivity
2.1.3.2.1 measured date
2.1.4 measurable quantity (class) (e.g. pH)
2.1.4.1 theoretical
2.1.4.2 measured
2.1.4.2.1 measured conductivity
2.1.4.2.1 measured date
2.2 fluorescent particles (class)
2.2.0 diameter
2.2.. measurable quantity (class) (e.g. zeta)
2.2.. measurable quantity (class) (e.g electrophoretic mobility)
2.2.. spectral characteristics
2.2.1 solid materials (class) (e.g. polystyrene)
2.2.1.1 %/vol
2.2.2 liquid materials (class) (e.g. DI water)
2.2.3 liquid materials (Class) (e.g. sodium azide)
2.2.3.1 conductivity
2.2.3.2 concentration
3. illumination (class)
3.1 source (class)
3.1.1 type (e.g. Hg lamp)
3.1.2 intensity
3.1.3 emission spectra
3.2 optical element (class) (e.g. excitation filter)
3.3 optical element (class) (e.g. emission filter)
3.4 optical element (class) (e.g. dichroic mirror)
4. microscope
4.1 type (Olympus iX 73)
4.2 objective (class)
4.2.1 numerical aperature (e.g. 0.3)
4.2.2 magnification (e.g. 20X)
4.2.3 field of view (e.g. 500 x 500 um)
4.2.4 depth of focus (e.g 4.1 microns)
"""
self.name = name
self.chip = chip
self.optics = optics
self.fluid_handling_system = fluid_handling_system
class chip(object):
def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=None, fluid_handling_system=None,
material_in_optical_path=None, thickness_in_optical_path=None):
"""
Everything important about the chip
"""
#self.material = material # deprecated so the channel class can hold this information
self.channel = channel
self.bpe = bpe
self.electrodes = electrodes
self.fluid_handling_system = fluid_handling_system
self.material_in_optical_path = material_in_optical_path
self.thickness_in_optical_path = thickness_in_optical_path
class channel(object):
def __init__(self, length=None, width=None, height=None,
material_bottom_wall_surface=None, material_top_wall_surface=None, material_fluid=None):
"""
Everything important about the chip
"""
self.length = length
self.width = width
self.height = height
self.material_bottom_wall_surface = material_bottom_wall_surface # material should only hold relevant electrokinetic data
self.material_top_wall_surface = material_top_wall_surface # material should only hold relevant elect
self.material_fluid = material_fluid # could be a mixture of liquid materials + fluorescent particles
class bpe(object):
def __init__(self, length=None, width=None, height=None, material=None, adhesion_material=None,
dielectric_coating=None):
"""
Everything important about the chip
"""
self.length = length
self.linspace_x = np.linspace(-length/2, length/2, num=100)
self.width = width
self.height = height
self.material = material
if self.material.thickness:
if self.material.thickness != self.height:
raise ValueError("BPE height must equal BPE material thickness")
# adhesion layer used for thin metal film BPE
self.adhesion_material = adhesion_material
# dielectric coating on top of BPE
if dielectric_coating:
self.dielectric_coating = dielectric_coating
else:
self.dielectric_coating = material_solid(name='no_dielectric', permittivity=1, thickness=1e-12, Ka=6, Kb=2, reaction_site_density=5)
class optics(object):
def __init__(self, microscope, fluorescent_particles=None, calibration_grid=None, pixel_to_micron_scaling=None):
self.microscope = microscope
self.fluorescent_particles = fluorescent_particles
self.calibration_grid = calibration_grid
if self.microscope.objective.magnification == 50:
self.pixel_to_micron_scaling = 0.60 # (microns/pixels)
elif self.microscope.objective.magnification == 20:
self.pixel_to_micron_scaling = 1.55 # (microns/pixels)
else:
raise ValueError("Unable to determine microns/pixels scaling because objective magnification not 50X or 20X")
if pixel_to_micron_scaling is not None:
print("Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.".format(self.pixel_to_micron_scaling, self.microscope.objective.magnification))
"""
--- I THINK THIS SECTION IS DEPRECATED ---
Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have
permanently figured out the correct scaling.
if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:
self.pixel_to_micron = microscope.objective.pixel_to_micron
elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:
raise ValueError("Conflicting scaling factors: microscope.objective={}, optics={}".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))
elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:
self.pixel_to_micron = pixel_to_micron_scaling
"""
class illumination(object):
def __init__(self, basePath=None, source=None, excitation=None, emission=None, dichroic=None, illumination_distribution=None,
calculate_illumination_distribution=False,
illumPath=None, illumSavePath=None, illumSaveName=None, showIllumPlot=False, save_txt=False, save_plot=False, save_image=False):
"""
details about the optical setup
:param source:
:param excitation:
:param emission:
:param dichroic:
"""
self.basePath = basePath # this should come from CurlypivTestCollection
self.source = source
self.excitation_wavelength = excitation
self.emission_wavelength = emission
self.dichroic = dichroic
if illumination_distribution is not None:
self.illumination_distribution = illumination_distribution
elif illumPath is not None:
flatfield = io.imread(illumPath, plugin='tifffile')
if len(np.shape(flatfield)) > 2:
flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)), dtype='uint16')
self.illumination_distribution = flatfield
elif calculate_illumination_distribution and illumination_distribution is None:
self.illumination_distribution = measureIlluminationDistributionXY(basePath=self.basePath, illumPath=illumPath,
show_image=showIllumPlot, save_image=save_image, save_img_type='.tif',
save_txt=save_txt, show_plot=showIllumPlot, save_plot=save_plot,
savePath=illumSavePath, savename=illumSaveName)
else:
self.illumination_distribution = illumination_distribution
self.flatfield = self.illumination_distribution
if self.flatfield is not None:
self.flatfield_mean = np.mean(self.flatfield)
self.flatfield_std = np.std(self.flatfield)
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=darkframePath, flip_image_axes=flip_image_across_axis, show_image=show_image, save_image=save_image, save_img_type=save_img_type,
savePath=savePath, savename=savename, save_plot=save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type # e.g. Olympus iX73
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,
vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,
frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',
pixels=512, pixel_size=16e-6):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
# supporting camera acquisition settings
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = (pixels, pixels)
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None, magnification=None, basePath=None, channel_height=None, illumination=None, wavelength=None, microgrid=None, auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None, field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
# if name is entered, then pull all the terms directly
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
# general terms
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError("A wavelength is required via the <illumination> class or <wavelength> input parameter")
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation, depth_of_field=self.depth_of_field, show_depth_plot=show_depth_plot, save_depth_plot=save_depth_plot,
basePath=basePath, savename=None, channel_height=channel_height, objective=self.magnification)
# grids and scaling factors
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=16e-6, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = self._wavelength*n/self.numerical_aperture**2+e*n/(self.magnification*self.numerical_aperture)
def calculate_depth_of_correlation(self, eps=0.01):
# step 0: define
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
# step 1: calculate the depth of correlation for the optical setup
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA, dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError("Need objective.microgrid property in order to calculate scaling factor")
# script to calculate scaling factor from grid
# would go here
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None, feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
# find files in directory
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError("No grid*.tif files found in {}".format(self.gridPath))
img_grid = np.zeros(shape=(512,512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None,diameter=None,fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials=materials
self.concentration=concentration
self.electrophoretic_mobility=electrophoretic_mobility
self.zeta=zeta
self.diameter=diameter
if diameter:
k_b = 1.3806e-23
T=298
mu=0.001
self.diffusivity = k_b*T/(6*np.pi*mu*diameter/2)
self.fluorescence_spectra=fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81 # m/s**2
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi*self.diameter**2/4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = material.density*g*self.height_of_reservoir
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None, onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir=fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths=passing_wavelengths
self.reflectivity=reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None, index_of_refraction=None, transparency=None, fluorescence_spectra=None,
permittivity=None, conductivity=None, thickness=None, youngs_modulus=None, poissons_ratio=None,
density=None, dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
# identity
self.name = name
# geometry
self.length = length
self.width = width
self.thickness = thickness
# mechanical
self.density = density
self.concentration = concentration # For a solid, this is % by volume.
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
# optical
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
# electrochemical
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density*1e18 # (#/nm2) surface density of reaction sites: accepts nm2 and converts to m2 (see Squires)
self.Ka = Ka # reaction equilibrium constant - upper bound
self.Kb = Kb # reaction equilibrium constant - lower bound
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None, conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
# identity
self.name = name
# electro/chemical
self.species = species
self.concentration = concentration # (mmol) = (mmol/L) = (mol/m3)
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10**-pH * 1e3 # (mmol) = (mmol/L) = (mol/m3); (concentration of Hydrogen ions (H+)
self.valence = valence
# mechanical
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-9 # (m^2/s) Diffusivity of KCl in DI water [Soni] |
3,302 | 3ea123aceb72e4731afe98cf4c5beced2d424035 | from django.conf.urls import url
from tipz import views
urlpatterns = [
# /tipz/
url(r'^$', views.IndexView.as_view(), name='index'),
# /tipz/login
url(r'^login/$', views.LoginFormView.as_view(), name='login'),
# /tipz/logout
url(r'^logout/$', views.LogoutFormView.as_view(), name='logout'),
# /tipz/register
url(r'^register/$', views.RegisterFormView.as_view(), name='register'),
# /tipz/users/
url(r'^users/$', views.UsersView.as_view(), name='users'),
# /tipz/projects/
url(r'^projects/$', views.ProjectsView.as_view(), name='projects'),
# /tipz/pledges/
url(r'^pledges/$', views.PledgesView.as_view(), name='pledges'),
# /tipz/users/1/
url(r'^users/(?P<pk>[0-9]+)/$', views.UsersDetailView.as_view(), name='usersDetail'),
# /tipz/projects/1/
url(r'^projects/(?P<pk>[0-9]+)/$', views.ProjectsDetailView.as_view(), name='projectsDetail'),
# /tipz/pledges/1/
url(r'^pledges/(?P<pk>[0-9]+)/$', views.PledgesDetailView.as_view(), name='pledgesDetail'),
# /tipz/projects/add/
url(r'^projects/add/$', views.ProjectCreate.as_view(), name='projects-add'),
# /tipz/pledges/add/
url(r'^pledges/add/$', views.PledgeCreate.as_view(), name='pledges-add'),
# /tipz/projects/1/edit/
url(r'^projects/(?P<pk>[0-9]+)/edit/$', views.ProjectUpdate.as_view(), name='projects-update'),
# /tipz/pledges/1/edit/
url(r'^pledges/(?P<pk>[0-9]+)/edit/$', views.PledgeUpdate.as_view(), name='pledges-update'),
# /tipz/projects/1/delete/
url(r'^projects/(?P<pk>[0-9]+)/delete/$', views.ProjectDelete.as_view(), name='projects-delete'),
# /tipz/pledges/1/delete/
url(r'^pledges/(?P<pk>[0-9]+)/delete/$', views.PledgeDelete.as_view(), name='pledges-delete')] |
3,303 | 0d32fe36f71ffb3df56738664c5dbd0b8ae585e3 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 19 17:15:58 2021
@author: Professional
"""
#son = int(input("Biror son kiriting: ") )
#print(son, "ning kvadrati", son*son, "ga teng")
#print (son, "ning kubi", son*son*son, "ga teng")
#yosh = int(input("Yoshingiz nechida: "))
#print("Siz", 2021 - yosh, "yilda tug'ilgansz")
a = int(input("Birinchi sonni kiriting: "))
b = int(input("Ikkinchi sonni kiriting: "))
print("yig'indisi ", a + b)
print("ayirmasi ", a - b)
print("bo'linmasi ", a/b)
print("ko'paytmasi ", a*b) |
3,304 | 442c6c4894fc01d0f8142f3dcedfd51ba57aedd1 | from enum import Enum
from typing import List, Optional
from pydantic import BaseModel
class Sizes(str, Enum):
one_gram = "1g"
two_and_half_gram = "2.5g"
one_ounce = "1oz"
five_ounce = "5oz"
ten_ounce = "10oz"
class PriceSort(str, Enum):
gte = "gte"
lte = "lte"
class Metals(str, Enum):
gold = "gold"
silver = "silver"
class PriceFilter(BaseModel):
type: PriceSort
price: float
class ProductSearch(BaseModel):
price: Optional[PriceFilter]
metals: Optional[List[Metals]]
size: Optional[Sizes]
|
3,305 | ec625bf57388281b3cbd464459fc3ad1c60b7db9 | '''
多线程更新UI数据(在两个线程中传递数据)
'''
from PyQt5.QtCore import QThread , pyqtSignal, QDateTime
from PyQt5.QtWidgets import QApplication, QDialog, QLineEdit
import time
import sys
class BackendThread(QThread):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString("yyyy-MM-dd hh:mm:ss")
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400,100)
self.input = QLineEdit(self)
self.input.resize(400,100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self,data):
self.input.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
example = ThreadUpdateUI()
example.show()
sys.exit(app.exec_()) |
3,306 | 3657d02271a27c150f4c67d67a2a25886b00c593 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 11:06:35 2020
@author: fitec
"""
# version 1
print(" Début du projet covid-19 !! ")
print(" test repository distant")
|
3,307 | acf69cd714f04aeceb4be39b8a7b2bc5d77cd69f | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DATE
from sqlalchemy.orm import relationship
from database import Base
class User(Base):
__tablename__ = "users"
username = Column(String, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
name = Column(String, index=True)
last_name = Column(String, index=True)
celular = Column(String, index=True)
user_password = Column(String)
documents = relationship("Documents", back_populates="owner")
class Documents(Base):
__tablename__ = "documents"
id = Column(Integer, primary_key=True, index=True)
name_doc = Column(String, index=True)
exp = Column(DATE, index=True)
notif = Column(Integer)
descrip = Column(String, index=True)
owner_username = Column(String, ForeignKey("users.username"))
owner = relationship("User", back_populates="documents") |
3,308 | 63830a3c09a2d0a267b030a336062d5e95b9a71a | from django.urls import path
from . import views
app_name = 'restuarant'
urlpatterns = [
path('orderplaced/',views.orderplaced),
path('restaurant/',views.restuarent,name='restuarant'),
path('login/restaurant/',views.restLogin,name='rlogin'),
path('register/restaurant/',views.restRegister,name='rregister'),
path('profile/restaurant/',views.restaurantProfile,name='rprofile'),
path('restaurant/create/',views.createRestaurant,name='rcreate'),
path('restaurant/update/<int:id>/',views.updateRestaurant,name='rupdate'),
path('restaurant/orderlist/',views.orderlist,name='orderlist'),
path('restaurant/menu/',views.menuManipulation,name='mmenu'),
path('logout/',views.Logout,name='logout'),
path('restaurant/<int:pk>/',views.restuarantMenu,name='menu'),
path('checkout/',views.checkout,name='checkout'),
path('profile/change_password/', views.change_password, name='change_password')
] |
3,309 | 1cc8695aa694359314b6d478fe6abed29fdc6c91 |
def DFS(x):
# 전위순회
if x > 7:
return
else:
DFS((x * 2))
print(x)
DFS((x*2)+1)
if __name__ == "__main__":
DFS(1) |
3,310 | 67eb9985fc0ae9a00ce84a2460b69b00df1c9096 | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
account_sid = 'AC76d9b17b2c23170b7019924f709f366b'
auth_token = '8fba7a54c6e3dc3754043b3865fa9d82'
client = Client(account_sid, auth_token)
user_sample = [
{
"_id": "5e804c501c9d440000986adc",
"name": "Lizzie Siegle",
"milesRan": 20,
"milesGoal": 30
},
{
"_id": "5e804c501c9d440000986adc",
"name": "Jeff Lawson",
"milesRan": 5,
"milesGoal": 20
}
]
if (user_sample[0].get("milesRan") >= user_sample[1].get("milesGoal")):
message = client.messages \
.create(
body='Oh, no! ' + user_sample[0].get("name") + ' surpassed your running goal this week. Get moving to keep the lead!',
from_='+13107364584',
to='+19162673363'
)
print(message.sid)
else:
print("Nothing sent!");
|
3,311 | abb2cfd2113e8de6c7bba42c357f0ec140b224a9 | from scrapy import cmdline
cmdline.execute("scrapy crawl ariz".split()) |
3,312 | e5979aeb7cff0e2a75966924382bae87aebcfcb2 | from random import random
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
def run_random_numbers():
print(f'{random_numbers=}')
rnd_gen = random_numbers()
print(f'{rnd_gen=}')
print(f'{next(rnd_gen)=}')
print(f'{next(rnd_gen)=}')
# but we can have two way communication
print(f'{rnd_gen.send(None)=}')
print(f'{rnd_gen.send(42)=}')
# rnd_gen.throw(Exception)
# rnd_gen.close()
# next(rnd_gen)
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
def run_input_gen():
inout_g = inout_gen()
next(inout_g)
print(f'{next(inout_g)}')
print(f'{inout_g.send(22)}')
print(f'{next(inout_g)}')
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
# ok
pass
else:
raise Exception('Generator should be invalid')
def exercise2():
"""Update `exercise_gen`, so it will ignore all exceptions"""
g1 = exercise_gen("I'll ignore errors", 300)
assert next(g1) == "I'll ignore errors"
assert g1.send('new val') == 'new val'
assert g1.throw(Exception) == 'new val'
assert next(g1) == 'new val'
if __name__ == '__main__':
run_random_numbers()
run_input_gen()
exercise1()
exercise2()
|
3,313 | 63822d60ef9dcc1e123a3d20874e9f492b439c6d | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
"""
企查查-行政许可[工商局]
"""
import json
import time
import random
import requests
from lxml import etree
from support.use_mysql import QccMysql as db
from support.others import DealKey as dk
from support.others import TimeInfo as tm
from support.headers import GeneralHeaders as gh
class AdmLicense():
def get_com_id(self): # 随机获取一条符合条件的公司信息
sel = """
SELECT `com_id`,`com_name`
FROM `com_info`
WHERE `origin`
IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL
ORDER BY RAND() LIMIT 1;
"""
# 测试sql#
# sel = """
# SELECT `com_id`, `com_name`
# FROM `com_info`
# WHERE com_id = '299eee201318f0283f086b4847d69fc7';
# """
# 测试sql#
result = db().selsts(sel)
if result == ():
result = [None, None]
else:
result = result[0]
return result
def upd_status(self, com_id,status_column,count_column, count): # 更新com_info表相关字段状态码
if count == -1:
status = -1
elif count == 0:
status = 0
else:
status = 9
upd = f"""
UPDATE
`com_info`
SET
`{status_column}` = "{status}",`{count_column}` = "{count}"
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
def adm_license_judge(self): # 判断行政许可信息,如果有记录则执行解析,返回该公司相关信息
global com_id, com_name
al = AdmLicense()
count_adm_license = 0
count = 0
while count_adm_license == 0 or count_adm_license == -1:
result = al.get_com_id()
com_id = result[0]
com_name = result[1]
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{adm_license_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{adm_license_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{adm_license_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_adm_license = tree.xpath('//div[@class="company-nav-items"]/span[contains(text(),"行政许可")]/span/text()|//div[@class="company-nav-items"]/a[@data-pos="licenslist"]/span/text()')[0]
count_adm_license = int(count_adm_license)
except:
count_adm_license = -1
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_adm_license == 0 or count_adm_license == -1:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:{count_adm_license}')
status_column = 'status_credit_adm_license' #表字段名
count_column = 'count_credit_adm_license' #表字段名
al.upd_status(com_id,status_column,count_column,count_adm_license)
return com_id, com_name, count_adm_license
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id,com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath('//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()')[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}')
status_column = 'status_credit_adm_license_bc' # 表字段名
count_column = 'count_credit_adm_license_bc' # 表字段名
alb.upd_status(com_id, status_column, count_column, count_bc)
return com_id, com_name, count_bc
def get_page_count(self): # 获取页码长度
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self): # 解析页面内容
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath('//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9, count, page, count_page))
localtime = tm().get_localtime() # 当前时间
create_time = localtime
print(f'当前时间:{create_time}')
print(f'公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n'
f'有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}')
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime() # 当前时间
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense): #行政许可[信用中国]
def cc_judge(self):
global com_id,com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath('//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()')[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}')
status_column = 'status_credit_adm_license_cc' # 表字段名
count_column = 'count_credit_adm_license_cc' # 表字段名
cd.upd_status(com_id, status_column, count_column, count_cc)
return com_id, com_name, count_cc
def get_page_info(self): # 解析页面内容
global project_name,license_status,license_content,expire_time,approval_category,area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath('//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath('td[5]/a[@class="xzxukeView"]/@onclick')[0].split('xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id':f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds,data=para).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9, count, page, count_page))
localtime = tm().get_localtime() # 当前时间
create_time = localtime
print(f'当前时间:{create_time}')
print(f'公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n'
f'决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n'
f'审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}')
input('Pause')
if __name__ == '__main__':
cc = AdmLicenseCc()
cc.get_page_info() |
3,314 | ae71cbd17ec04125354d5aac1cf800f2dffa3e04 | # new libraries
import ConfigParser
import logging
from time import time
from os import path
# imports from nike.py below
import smass
import helperFunctions
import clusterSMass_orig
import numpy as np
from joblib import Parallel, delayed
def getConfig(section, item, boolean=False,
userConfigFile="BMA_StellarMass_Config.ini"):
configFile = ConfigParser.ConfigParser()
configFile.read(userConfigFile)
# if config item not found, raise log warning
if (not configFile.has_option(section, item)):
msg = '{item} from [{setion}] NOT found in config file: {userConfigFile}!'.format(
item=item, section=section,
userConfigFile=userConfigFile)
if (section != 'Log'):
logging.warning(msg)
else:
print msg
return ""
# else save item value (debug)
msg = '{item}: {value}'.format(
item=item, value=configFile.get(section, item))
if (section != 'Log'):
logging.debug(msg)
else:
print msg
if (not boolean):
return configFile.get(section, item)
else:
return configFile.getboolean(section, item)
def isOperationSet(operation,section="Operations"):
return getConfig(boolean=True, section=section,
item=operation)
def createLog():
logLevel = getConfig("Log","level")
logFileName = getConfig("Log","logFile")
myFormat = '[%(asctime)s] [%(levelname)s]\t%(module)s - %(message)s'
if logLevel == 'DEBUG':
logging.basicConfig(
filename=logFileName,
level=logging.DEBUG,
format=myFormat)
else:
logging.basicConfig(
filename=logFileName,
level=logging.INFO,
format=myFormat)
def extractTarGz(tarFileName, path):
import tarfile
tar = tarfile.open(tarFileName, "r:gz")
tar.extractall(path=inputPath)
tar.close()
def getInputPath():
inputPath = getConfig("Paths","inputPath")
# if a inputPath is not set, go after the .tar.gz file
if (not inputPath):
# if tarFile doesn't exist, abort
tarName = getConfig("Files","tarFile")
if (not tarName or not path.isfile(tarName) or
not tarName.endswith("tar.gz")):
return ""
# defining inputPath to uncompress file
inputPath = "./simha_miles_Nov2016/"
extractTarGz(tarFileName=tarName, path=inputPath)
return inputPath
def getStellarMassOutPrefix():
stellarMassOutPrefix = getConfig("Files","stellarMassOutPrefix")
if not stellarMassOutPrefix:
logging.critical("Can't continue without stellarMassOutPrefix defined! Exiting.")
exit()
return stellarMassOutPrefix
def combineFits():
from combineCat import combineBMAStellarMassOutput
stellarMassOutPrefix = getStellarMassOutPrefix()
combineBMAStellarMassOutput(stellarMassOutPrefix)
def computeStellarMass(batch, memPerJob):
# For running the stellar masses (takes the longest)
batchIndex = batch + memPerJob
job = int(batchIndex / memPerJob)
logging.debug('Starting computeStellarMass() with batch = {b}; job = {j}.'.format(
b = batch, j = job))
stellarMassOutFile = getConfig("Files","stellarMassOutPrefix") + "{:0>5d}.fits".format(job)
inPath = getInputPath()
membersInFile = getConfig("Files","membersInputFile")
if (not inPath or not membersInFile):
logging.critical("Can't continue without either inputPath or membersInputFile defined! Exiting.")
exit()
inputDataDict = helperFunctions.read_afterburner(membersInFile, batch, batchIndex)
smass.calc(inputDataDict, outfile=stellarMassOutFile, indir=inPath, lib="miles")
logging.debug('Returning from computeStellarMass() with batch = {b}; job = {j}.'.format(
b = batch, j = job))
def computeClusterStellarMass():
stellarMassFile = getConfig("Files","stellarMassOutPrefix") + 'full.fits'
clusterOutFile = getConfig("Files","clusterStellarMassOutFile")
logging.info('Computing cluster stellar mass.')
clusterSMass_orig.haloStellarMass(filename = stellarMassFile, outfile = clusterOutFile)
def parallelComputeStellarMass(batchStart=0,
batchMax=25936, nJobs=100, nCores=20):
# nJobs is normally = 100
batchesList = np.linspace(batchStart, batchMax, nJobs, endpoint=False, dtype=int)
logging.info('Calling parallelism inside parallelComputeStellarMass().')
Parallel(n_jobs=nCores)(delayed(computeStellarMass)
(batch, (batchMax - batchStart) / nJobs)
for batch in batchesList)
# generate concatenated fits file
logging.info('Combining fits.')
combineFits()
def main():
# start logging
createLog()
logging.info('Starting BMA Stellar Masses program.')
# get initial time
total_t0 = time()
# check and parallel compute stellar mass,
# if it is the case
if (isOperationSet(operation="stellarMass")):
logging.info('Starting parallel stellar masses operation.')
section = "Parallel"
stellarMass_t0 = time()
# get parallel information
batchStart = int(getConfig(section, "batchStart"))
batchMax = int(getConfig(section, "batchMax"))
nJobs = int(getConfig(section, "nJobs"))
nCores = int(getConfig(section, "nCores"))
# call function to parallel compute
parallelComputeStellarMass(batchStart=batchStart,
batchMax=batchMax, nJobs=nJobs, nCores=nCores)
# save time to compute stellar mass
stellarMassTime = time() - stellarMass_t0
stellarMassMsg = "Stellar Mass (parallel) time: {}s".format(stellarMassTime)
logging.info(stellarMassMsg)
# check and compute cluster stellar mass,
# if it is the case
if (isOperationSet(operation="clusterStellarMass")):
logging.info('Starting cluster stellar mass operation.')
clusterStellarMassTime_t0 = time()
computeClusterStellarMass()
# save time to compute cluster stellar mass
clusterStellarMassTime = time() - clusterStellarMassTime_t0
clusterStellarMassMsg = "Cluster Stellar Mass time: {}s".format(clusterStellarMassTime)
logging.info(clusterStellarMassMsg)
# save total computing time
totalTime = time() - total_t0
totalTimeMsg = "Total time: {}s".format(totalTime)
logging.info(totalTimeMsg)
logging.info('All done.')
if __name__ == "__main__":
main()
|
3,315 | 3d0fe0c11e62a03b4701efb19e1c15272ccc985e | """
Batch viewset
Viewset to batch serializer
"""
# Django Rest Framework
from rest_framework import viewsets
# Inventory models
from apps.inventory.models import Batch
# Inventory serializers
from apps.inventory.serializers import BatchSerializer
class BatchViewSet(viewsets.ModelViewSet):
"""
Batch viewset
CRUD views of the batch serializer
"""
queryset = Batch.objects.all()
serializer_class = BatchSerializer
def perform_destroy(self, instance):
"""
perform_destroy is used to performance a logic delete
"""
instance.is_active = not instance.is_active
instance.save()
|
3,316 | 5485fe4f612ededc11e3a96dfd546e97a56cbe2a | # Generated by Django 2.2.5 on 2019-10-09 12:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0002_customer_employee_lead_manager'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core.Product')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('created', models.DateTimeField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='core.Product')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('estimated', models.DateTimeField()),
('reported', models.DateTimeField()),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='users.Employee')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='core.Ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('file', models.FileField(upload_to='')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='core.Ticket')),
],
),
]
|
3,317 | fa045ccd4e54332f6c05bf64e3318e05b8123a10 | # Generated by Django 2.2.13 on 2021-08-11 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("notifications", "0011_auto_20171229_1747"),
]
operations = [
migrations.AlterField(
model_name="notification",
name="date",
field=models.DateTimeField(auto_now=True, verbose_name="Dato"),
),
migrations.AlterField(
model_name="notification",
name="priority",
field=models.PositiveIntegerField(
choices=[(0, "Low"), (1, "Medium"), (2, "High")],
default=1,
verbose_name="priority",
),
),
migrations.AlterField(
model_name="notification",
name="sent_mail",
field=models.BooleanField(default=False, verbose_name="sent mail"),
),
]
|
3,318 | 7a2b33d1763e66335c6a72a35082e20725cab03d | # -*- coding:utf-8 -*-
#
from django.core.paginator import Paginator
def pagination(request, queryset, display_amount=15, after_range_num=5, bevor_range_num=4):
# 按参数分页
paginator = Paginator(queryset, display_amount)
try:
# 得到request中的page参数
page = int(request.GET['page'])
except:
# 默认为1
page = 1
try:
# 尝试获得分页列表
objects = paginator.page(page)
# 如果页数不存在
except paginator.EmptyPage:
# 获得最后一页
objects = paginator.page(paginator.num_pages)
# 如果不是一个整数
except:
# 获得第一页
objects = paginator.page(1)
# 根据参数配置导航显示范围
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+bevor_range_num]
else:
page_range = paginator.page_range[0:page+bevor_range_num]
return objects, page_range
|
3,319 | 07aafcb3db9c57ad09a29a827d72744ef0d22247 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from whoosh.fields import TEXT, ID, Schema
bw2_schema = Schema(
name=TEXT(stored=True, sortable=True),
comment=TEXT(stored=True),
product=TEXT(stored=True, sortable=True),
categories=TEXT(stored=True),
location=TEXT(stored=True, sortable=True),
database=TEXT(stored=True),
code=ID(unique=True, stored=True),
)
|
3,320 | 33c241747062ab0d374082d2a8179335503fa212 | """ Problem statement:
https://leetcode.com/problems/contains-duplicate-ii/description/
Given an array of integers and an integer k, find out whether
there are two distinct indices i and j in the array such that nums[i] = nums[j]
and the absolute difference between i and j is at most k.
"""
class Solution:
def containsNearbyDuplicate(self, nums, k):
""" Time complexity: O(n). Space complexity: O(n), n is len(nums).
"""
nums_dict = dict() # integer: most recent index
for i, n in enumerate(nums):
if n in nums_dict and abs(nums_dict[n] - i) <= k:
return True
nums_dict[n] = i # update index of integer n in dictionary
return False
if __name__ == "__main__":
sol = Solution()
nums = [1, 2, 3, 4, 1, 6, 8]
k = 4
print(sol.containsNearbyDuplicate(nums, k))
|
3,321 | 05454cc6c9961aa5e0de6979bb546342f5bd7b79 | # The following code causes an infinite loop. Can you figure out what’s missing and how to fix it?
# def print_range(start, end):
# # Loop through the numbers from start to end
# n = start
# while n <= end:
# print(n)
# print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
# Solution
# Variable n's value is not being incremented. We need to increment the value.
# Here is the example
def print_range(start, end):
# Loop through the numbers from start to end
n = start
while n <= end:
print(n)
n+=1
print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line) |
3,322 | 8ee26d181f06a2caf2b2b5a71a6113c245a89c03 | #!/usr/bin/python
# -*- coding : utf-8 -*-
"""
@author: Diogenes Augusto Fernandes Herminio <diofeher@gmail.com>
"""
# Director
class Director(object):
def __init__(self):
self.builder = None
def construct_building(self):
self.builder.new_building()
self.builder.build_floor()
self.builder.build_size()
def get_building(self):
return self.builder.building
# Abstract Builder
class Builder(object):
def __init__(self):
self.building = None
def new_building(self):
self.building = Building()
# Concrete Builder
class BuilderHouse(Builder):
def build_floor(self):
self.building.floor ='One'
def build_size(self):
self.building.size = 'Big'
class BuilderFlat(Builder):
def build_floor(self):
self.building.floor ='More than One'
def build_size(self):
self.building.size = 'Small'
# Product
class Building(object):
def __init__(self):
self.floor = None
self.size = None
def __repr__(self):
return 'Floor: %s | Size: %s' % (self.floor, self.size)
#Client
if __name__=="__main__":
director = Director()
director.builder = BuilderHouse()
director.construct_building()
building = director.get_building()
print building |
3,323 | d1402469232b5e3c3b09339849f6899e009fd74b | # -*- coding: utf-8 -*-
scheme = 'http'
hostname = 'localhost'
port = 9000
routes = [
'/available/2',
'/available/4'
]
|
3,324 | b8c749052af0061373808addea3ad419c35e1a29 | v1=int(input("Introdu virsta primei persoane"))
v2=int(input("Introdu virsta persoanei a doua"))
v3=int(input("Introdu virsta persoanei a treia"))
if ((v1>18)and(v1<60)):
print(v1)
elif((v2>18)and(v2<60)):
print(v2)
elif((v3>18)and(v3<60)):
print(v3) |
3,325 | e12c411814efd7cc7417174b51f0f756589ca40b | was=input()
print(was)
|
3,326 | 4ba0f7e947830018695c8c9e68a96426f49b4b5b | from ddt import ddt, data, unpack
import sys
sys.path.append("..")
from pages.homepage import HomePage
from base.basetestcase import BaseTestCase
from helpers.filedatahelper import get_data
@ddt
class QuickSearchTest(BaseTestCase):
testingdata = get_data('testdata/QuickSearchTestData.xlsx')
@data(*testingdata)
@unpack
def test_QuickSearch(self, search_value, expected_result, notes):
homepage = HomePage(self.driver)
search_results = homepage.search.searchFor(search_value)
self.assertTrue(expected_result in search_results.get_results())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
3,327 | 896329a8b14d79f849e4a8c31c697f3981395790 | # 문제 풀이 진행중..(나중에 재도전)
import collections
class Solution(object):
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
# 전체 연결점 개수 확인한다.
# 개수가 적은 것 부터 처리한다
# # 연결된 게 0개인 애들은 제외
#
# data init
stones_share_list = []
for i in range(len(stones)):
stones_share_list.append(0)
# set data(connecting count of stones)
for i in range(len(stones)):
check_stone = stones[i]
connect_count = 0
for j in range(len(stones)):
if i is j:
continue
if check_stone[0] is stones[j][0] or check_stone[1] is stones[j][1]:
connect_count += 1
stones_share_list[i] = connect_count
connect_sum = 0
for share in stones_share_list:
connect_sum += share
if connect_sum is 0:
return 0
island = 0
print(stones_share_list)
for connect in stones_share_list:
if connect is 0:
island += 1
print(island)
return len(stones) - (island + 1)
s = Solution()
# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2],[2,3]]
# temp_value = [[0,0],[0,1],[1,0],[1,2],[2,1],[2,2]]
# temp_value = [[0,0],[0,2],[1,1],[2,0],[2,2]]
temp_value = [[3,2],[3,1],[4,4],[1,1],[0,2],[4,0]]
print(s.removeStones(temp_value)) |
3,328 | 7db31940aea27c10057e2ce1e02410994bd2039b | from ROOT import *
import math
import os,sys,time,glob,fnmatch
import argparse
import ROOT
import sys
sys.path.append("utils")
from moments import *
from dirhandle import *
from plothandle import *
from AnalysisGeneratorMT import *
def doAnalysis( blabla):
return blabla.DoThreatdAnalysis()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--test' , action='store_true', default = False)
parser.add_argument('-v', '--verbose' , action='store_true', default = False)
parser.add_argument("--TreeName" , type=str, default = "truth", help="Tree to read? ")
parser.add_argument("-D", '--inputdir' , type=str, default = "/lustre/hpc/hep/alonso/Top/AnalysisTop-2.4.27/Download")
parser.add_argument("-O", '--outputdir' , type=str, default = "plotGeneratorLevel")
parser.add_argument("-b", '--bjets' , type=int, default = 0)
parser.add_argument('--treads' , type=int, default = 20)
args = parser.parse_args()
verbose = 0
if args.verbose:
verbose = 1
#Just ttbar samples:
pattern = [
"user.aalonso.410037*"
"user.aalonso.410038*",
"user.aalonso.410039*",
"user.aalonso.410040*",
"user.aalonso.410041*",
"user.aalonso.410250*",
"user.aalonso.410251*",
"user.aalonso.410252*",
"user.aalonso.410000*",
"user.aalonso.410001*",
"user.aalonso.410002*",
"user.aalonso.410003*",
"user.aalonso.410004*",
"user.aalonso.410159*",
"user.aalonso.410501*",
"user.aalonso.410009*",
"user.aalonso.410021*",
]
#pattern = ["user.aalonso.[3-4]*"]
treads=args.treads
Dir = args.inputdir
nbjets = args.bjets
TreeName = args.TreeName
outfolder = args.outputdir
outname = "outAll_Reco_MT"
outname += "_MC"
outname = outname + "_" +outfolder+ ".root"
print "########################################################################################################"
print "##"
print "## This is the setup we are going to use:"
print "## Input directory:\t\t\t", Dir
print "## Pattern for root files:\t\t", pattern
print "## Results will be saved in:\t\t", outfolder
print "## And: \t\t\t\t", outname
print "## Number of bjets :\t\t\t", nbjets
print "## Number of treads to run:\t\t", treads
print "##"
print "########################################################################################################"
CreateFolder (outfolder)
fortchain,keys = ScanFolder(Dir, pattern, TreeName)
workers = {}
for i in keys:
analysis = analysisGeneratorMT( fortchain[i], nbjets, i,verbose)
workers [i] = analysis
Histos = {}
MomentsAll = {}
if (treads > 1):
from multiprocessing import Process, Pool
pool = Pool(processes=treads) # start 4 worker processes
jobs = {}
for i in keys:
res = pool.apply_async( doAnalysis, ( workers [i],))
jobs[i] = res
for i in jobs:
histo , mom = jobs[i].get(timeout=100000)
if verbose:
print "Job: ",i
print histo
print mom
Histos[i] = histo
MomentsAll [i] = mom
else:
for i in keys:
histo,mom = workers [i].DoThreatdAnalysis()
Histos[i] = histo
MomentsAll [i] = mom
SaveRootFile( outname, Histos, MomentsAll)
|
3,329 | 60617ff6eda880e5467b3b79d3df13a7147f5990 | import math
def sieve(n):
sieve = [1] * (n+1)
sieve[1] = 0
sieve[0] = 0
for i in range(2, int(math.sqrt(n) + 1)):
if sieve[i] == 1:
for j in range(i*i, n + 1, i):
sieve[j] = 0
return sieve
def odd_prime(a):
while a != 0:
y = a % 10
if y == 3 or y == 5 or y ==7:
return False
else:
a = a // 10
return True
def main():
t = int(input())
for j in range(t):
x = int(input())
n = 75000
arr = sieve(n)
result = []
final = []
sum = 0
for i in range(len(arr)):
if arr[i] == 1:
result.append(i)
for i in range(len(result)):
if (odd_prime(result[i])):
final.append(result[i])
for i in range(x):
sum = sum + final[i]
print(sum)
if __name__ == '__main__':
main() |
3,330 | c925bed2f4d8120e156caebbe8e6bf9d6a51ee37 | import csv
import glob
import random
import sys
from math import ceil, floor
from os.path import basename, exists, dirname, isfile
import numpy as np
import keras
from keras import Model, Input, regularizers
from keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
from keras.optimizers import Adam, SGD
from sklearn.metrics import confusion_matrix, accuracy_score # , classification_report
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
from Dataset.Dataset_Utils.augmenter import NoAug
from Dataset.Dataset_Utils.datagen import DataGenerator as DataGen
from Dataset.Dataset_Utils.dataset_tools import print_cm
from Models.model_sharma import SharmaNet
from audio_classifier import AudioClassifier, from_arff_to_feture
from frames_classifier import FramesClassifier
from test_models import *
classes = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
def my_model():
r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5)
frame_input = Input(shape=(16, 1024))
audio_input = Input(shape=(16, 1582))
x = Concatenate(name='fusion1')([frame_input, audio_input])
x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x)
x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)
return Model([audio_input, frame_input], x)
class VideoClassifier:
def __init__(self, train_mode="late_fusion", video_model_path=None, time_step=16,
base_path="/user/vlongobardi/AFEW/aligned/", feature_name="emobase2010_100", stride=1):
self.time_step = time_step
self.train_mode = train_mode
self.feature_name = feature_name
self.classes = classes
self.lb = LabelBinarizer()
self.lb.fit_transform(np.array(classes))
self.feature_num = 1582
self.offset = ceil(int(self.feature_name.split("_")[1]) / 2 / 40)
self.stride = stride
if video_model_path is not None:
try:
self.model = my_model()
self.model.load_weights(video_model_path)
print("VideoClassifier loaded successfully", video_model_path)
except:
print("Exception")
else:
t_files = glob.glob(base_path + "Train" + "/*/*csv")
v_files = glob.glob(base_path + "Val" + "/*/*csv")
self.csv_fusion = self.generate_feature(t_files, v_files)
self.do_training()
def do_training(self):
skips = 0
iters = 1
bs = 16
ep = 150
opts = ["SGD"]#, "Adam"]
lrs = [0.01]
models = [my_model]
models_name = [x.__name__ for x in models]
for index, model in enumerate(models):
for opt in opts:
for lr in lrs:
for iteration in range(iters):
if skips > 0:
skips -= 1
continue
train_infos = {
"iteration": iteration, "model_name": models_name[index],
"batch_size": bs, "epoch": ep, "lr": lr, "opt": opt
}
print(
"\n\n################################################################################\n"
"############################## ITERATION " + str(iteration + 1) + " of " + str(iters) +
" ###########################\n######################################################" +
" ########################\nepochs:", ep, "batch_size:", bs, "\nmodel:", models_name[index],
"in", models_name, "\nopt:", opt, "in", opts, "\nlr:", lr, "in", lrs)
train_infos["generator1"] = self.early_gen_train
train_infos["generator2"] = self.early_gen_new_val
t_files, v_files = self.csv_fusion["train"], self.csv_fusion["val"]
m = model()
self.train(t_files, v_files, train_infos, m)
def generate_feature(self, t_files, v_files):
if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'):
print("\n##### GENERATING CSV FOR EARLY FUSION... #####")
csv_early_fusion = {
"train": self._generate_data_for_early_fusion(t_files, "train"),
"val": self._generate_data_for_early_fusion(v_files, "val")
}
print("\n##### CSV GENERATED! #####")
else:
csv_early_fusion = {}
for name in ["train", "val"]:
csv_early_fusion[name] = self.load_early_csv(name)
return csv_early_fusion
def load_early_csv(self, dataset):
csv_early_fusion = {}
print("Opening csv: features_path_early_fusion_" + dataset + "_" + self.feature_name + '.csv')
with open('features_path_early_fusion_' + dataset + "_" + self.feature_name + '.csv', 'r') as f:
f.readline()
csv_reader = csv.reader(f)
for clip_id, ground_truth, frame_label, audio_label in csv_reader:
if clip_id not in csv_early_fusion:
csv_early_fusion[clip_id] = []
csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label])
return csv_early_fusion
def _generate_data_for_early_fusion(self, files, name):
# '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv'
# '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat'
# '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff'
if "full" in self.feature_name:
frame_to_discard = 0
else:
window_size = int(self.feature_name.split("_")[1])
frame_to_discard = ceil(window_size / 2 / 40)
my_csv = {}
for file in tqdm(files):
clip_id_temp = file.split(".")[0]
base_path = clip_id_temp.replace("AFEW/aligned", "early_feature/framefeature") + "*"
frames_features_path = glob.glob(base_path)
audio_features_path = glob.glob(
base_path.replace("early_feature/framefeature", "early_feature/" + self.feature_name))
frames_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
if "full" not in self.feature_name:
audio_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
ground_truth = basename(dirname(clip_id_temp))
clip_id = basename(clip_id_temp)
# discard video frames based on window size
frames_features_path = frames_features_path[frame_to_discard:]
if len(frames_features_path) < 16:
continue
# print("FRAME TOO FEW SAMPLES:", len(frames_features_path), clip_id)
if len(audio_features_path) < 16 and "full" not in self.feature_name:
continue
# print("AUDIO TOO FEW SAMPLES:", len(audio_features_path), clip_id)
for index, frame in enumerate(frames_features_path):
if clip_id not in my_csv.keys():
my_csv[clip_id] = []
if "full" not in self.feature_name:
my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]])
else:
my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]])
with open('features_path_early_fusion_' + name + "_" + self.feature_name + '.csv', 'w') as f:
f.write("clip_id, ground_truth, frame_label, audio_label\n")
for key in my_csv:
for line in my_csv[key]:
f.write(key + "," + line[0] + "," + line[1] + "," + line[2] + "\n")
return my_csv
def early_gen_train(self, list_files, batch_size):
c = 0
clip_ids = list(self.csv_fusion["train"].keys())
random.shuffle(clip_ids)
while True:
labels = []
features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),
np.zeros((batch_size, self.time_step, 1024)).astype('float')]
for i in range(c, c + batch_size):
clip_id = clip_ids[i]
video_info = self.csv_fusion["train"][clip_id]
ground_truth = video_info[0][0]
# first_frame_num = int(video_info[0][1].split("_")[-1].split(".")[0])
start = random.randint(0, len(video_info) - self.time_step)
for index, elem in enumerate(video_info[start:self.time_step + start]):
_, frame_path, audio_path = elem
if not isfile(frame_path):
start += 1
if start >= len(video_info):
raise
continue
frame_feature = np.load(frame_path)
features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )
features[1][i - c][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
c += batch_size
if c + batch_size > len(clip_ids):
c = 0
random.shuffle(clip_ids)
labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))
yield features, labels
def early_gen_new_val(self, list_files, batch_size, mode="val", stride=1):
""" stride 50% sul su tutti i file """
c = 0
labels = features = []
clip_ids = list(list_files.keys())
while True:
for clip_id in tqdm(clip_ids):
video_info = list_files[clip_id]
ground_truth = video_info[0][0]
for start in range(0, len(video_info) - self.time_step, self.time_step // stride):
if c == 0:
labels = []
features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),
np.zeros((batch_size, self.time_step, 1024)).astype('float')]
for index, elem in enumerate(video_info[start:self.time_step + start]):
_, frame_path, audio_path = elem
frame_feature = np.load(frame_path)
features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape(
self.feature_num, )
features[1][c][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
c += 1
if c == batch_size:
c = 0
labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))
yield features, labels
if mode == "eval":
break
def early_gen_test_clip(self, list_files, clip_id, stride=1):
""" stride su singolo file, quindi va richiamato per ogni file """
ground_truth = list_files[0][0]
start = 0
end = len(list_files) - self.time_step
while True:
labels = []
features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),
np.zeros((1, self.time_step, 1024)).astype('float')]
for index, elem in enumerate(list_files[start:start + self.time_step]):
_, frame_path, audio_path = elem
frame_feature = np.load(frame_path)
features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )
features[1][0][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
start += self.time_step // stride
if start >= end:
break
labels = self.lb.transform(np.array(labels)).reshape((1, 7))
yield features, labels
def get_validation_dim(self):
if self.stride == 2:
if "full" in self.feature_name:
return 141
elif "600" in self.feature_name:
return 0
elif "300" in self.feature_name:
return 114
elif "100" in self.feature_name:
return 128
elif self.stride == 1:
if "full" in self.feature_name:
return 76
elif "600" in self.feature_name:
return 0
elif "300" in self.feature_name:
return 63
elif "100" in self.feature_name:
return 69
elif self.stride == self.time_step:
return 0
def train(self, train_files, val_files, train_data, model):
if train_data["opt"] == "Adam":
optimizer = Adam(lr=train_data["lr"])
else:
optimizer = SGD(lr=train_data["lr"])
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
train_gen = train_data["generator1"](train_files, train_data["batch_size"])
no_of_training_images = len(train_files)
no_of_val_images = self.get_validation_dim()
print("no_of_val_images:", no_of_val_images)
val_gen = train_data["generator2"](val_files, train_data["batch_size"])
# stride = 1, no overlapping
# stride = 2, overlapping: 50%
# stride = time_step, stride: 1
model_name = "_lr" + str(train_data["lr"]) + "_Opt" + train_data["opt"] + "_Model" + str(
train_data["model_name"]) + "_Feature" + self.feature_name + "_" + str(
train_data["iteration"]) + "_" + self.train_mode # + "_modelType" + str(self.model_type)
model_name += "stride" + str(self.stride)
model_name += ".h5"
def custom_scheduler(epoch):
if epoch < 50:
print(0.1)
return 0.1
if epoch < 100:
print(0.01)
return 0.01
if epoch < 125:
print(0.001)
return 0.001
else:
print(0.0001)
return 0.0001
#print(0.1 / 10 ** (floor(epoch / 40) + 1))
#return 0.1 / 10 ** (floor(epoch / 40) + 1)
class CheckValCMCallback(keras.callbacks.Callback):
def __init__(self, m, dim, validation_files, epoch):
super().__init__()
self.vc = m
self.dim = dim
self.val_files = validation_files
self.epoch = epoch
self.accs = []
def on_epoch_end(self, epoch, logs=None):
csv_fusion = self.vc.load_early_csv("val")
# gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval")
# predictions = []
# ground_truths = []
# for x in gen:
# ground_truths.append(self.vc.lb.inverse_transform(x[1])[0])
# pred = self.model.predict(x[0])
# pred = self.vc.lb.inverse_transform(pred)
# predictions.append(pred[0])
# self.vc.print_stats(ground_truths, predictions, "Video" + str(epoch))
gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval")
acc = self.model.evaluate_generator(gen, self.dim, workers=0)
self.accs.append(acc)
print("Evaluate:", acc)
if self.epoch == epoch + 1:
print("Validation_Accuracy =", self.accs)
cb = [ModelCheckpoint(
filepath=str(
"weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}" + model_name),
monitor="val_accuracy", save_weights_only=True),
TensorBoard(log_dir="NewFusionLogs_sched/" + self.train_mode + "/" + self.feature_name, write_graph=True,
write_images=True)]
cb += [LearningRateScheduler(custom_scheduler)]
#cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data["epoch"])]
history = model.fit_generator(train_gen,
validation_data=val_gen,
epochs=train_data["epoch"],
steps_per_epoch=(no_of_training_images * 2 // train_data["batch_size"]),
validation_steps=(no_of_val_images),
workers=0, verbose=1, callbacks=cb)
print("\n\nTrain_Accuracy =", history.history['accuracy'])
print("\nVal_Accuracy =", history.history['val_accuracy'])
print("\n\nTrain_Loss =", history.history['loss'])
print("\nVal_Loss =", history.history['val_loss'])
def print_stats(self, ground_truths, predictions, name):
cm = confusion_matrix(ground_truths, predictions, self.classes)
print("###" + name + " Results###\n")
# print_cm(cm, self.classes)
# print("\n\n")
print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)
print("\n\n")
print("Accuracy score: ", accuracy_score(ground_truths, predictions), "\n\n")
# print("Report")
# print(classification_report(ground_truths, predictions))
print("#################################################################end###\n\n\n")
def print_confusion_matrix(self, stride=1):
""" IMPLEMENT FOR EARLY FUSION MISSING """
csv_fusion = {}
predictions = []
ground_truths = []
if self.train_mode == "early_fusion":
csv_fusion = self.load_early_csv("val")
print("CSV loaded", len(csv_fusion))
gen = self.early_gen_new_val(csv_fusion, 1, "eval", stride)
for x in gen:
ground_truths.append(self.lb.inverse_transform(x[1])[0])
pred = self.model.predict(x[0])
pred = self.lb.inverse_transform(pred)
predictions.append(pred[0])
# print("\ngt, pred", self.lb.inverse_transform(x[1]), pred)
self.print_stats(ground_truths, predictions, "Video")
else:
with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f:
f.readline()
csv_reader = csv.reader(f)
for row in csv_reader:
csv_fusion[row[0]] = [row[1], row[2], row[3]]
a_p = []
f_p = []
files = glob.glob("/user/vlongobardi/late_feature/" + self.feature_name + "/*/*csv")
for file in files:
clip_id = basename(file).split(".")[0]
ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]
sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred])))
pred = self.model.predict(sample.reshape((1, 14)))
pred = self.lb.inverse_transform(pred)[0]
predictions.append(pred)
a_p.append(audio_pred)
f_p.append(frame_pred)
ground_truths.append(ground_truth)
self.print_stats(ground_truths, predictions, "Video")
self.print_stats(ground_truths, a_p, "Audio")
self.print_stats(ground_truths, f_p, "Frame")
if __name__ == "__main__":
if sys.argv[1] == "late":
print("LATE")
model_path = [
"audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5",
"audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5",
"audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5",
"audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5"
]
for mp in model_path:
vc = VideoClassifier(train_mode="late_fusion", audio_model_path=mp)
elif sys.argv[1] == "early":
# mt = int(sys.argv[2])
print("EARLY") # , Model_type:", mt)
arff_paths = {"e1": "emobase2010_100", "i1": "IS09_emotion_100",
"e3": "emobase2010_300", "i3": "IS09_emotion_300",
"e6": "emobase2010_600", "i6": "IS09_emotion_600",
"ef": "emobase2010_full", "if": "IS09_emotion_full"}
vc = VideoClassifier(train_mode="early_fusion", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)
|
3,331 | 10990282c8aa0b9b26a69e451132ff37257acbc6 | from django.views.generic import ListView
class ExperimentList(ListView):
pass
|
3,332 | 09698649510348f92ea3b83f89ffa1c844929b8f | import numpy
def CALCB1(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCASB/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS1B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=numpy.zeros((17+1))
TEMP1=numpy.zeros((289+1))
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28):
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND ANGLE : UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2() ## calling the internal function first time
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I][IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 1ST GEN LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B1')
sys.exit()
GOTO100()
# end
def CALCB2(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS1B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS2B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE INITIAL PHOTELECTRON AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
#16 CONTINUE
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I][IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 2ND GEN IONS LIMITED TO 28 IN THIS VERSION IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B2')
sys.exit()
GOTO100()
# end
def CALCB3(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS2B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS3B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL][IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I][IZ[KGAS][LGAS]+1])*0.5-(ELEV[J][IZ[KGAS][LGAS]]+ELEV[J][IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B3')
sys.exit()
GOTO100()
#end
def CALCB4(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
#CHARACTER*6
# SCR=""#(17)
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS3B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS4B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL,IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS][LGAS]]-ELEV[I,IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL,IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I,IZ[KGAS][LGAS]+1])*0.5-(ELEV[J,IZ[KGAS][LGAS]]+ELEV[J,IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
counter117=0
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 4TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B4')
sys.exit()
GOTO100()
# end
def CALCB5(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
# SCR=""
# SCR1=""#(17)
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#(17)
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS4B/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS5B/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
#DIMENSION
TEMP=[0 for x in range(17)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
ISHELLST=ISHELL
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
ISHELL=ISHELLST
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
pass
else:
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC,IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR[KGAS][LGAS][ISHELL][1]
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL][IZ[KGAS][LGAS]]-ELEV[I][IZ[KGAS][LGAS]]
ELEFT=ELEFT-abs(EPHOTON[NVAC][IFLSUM[NVAC]])
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
GOTO2()
counter116=1
while(counter116):
counter116=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter116):
break
for I in range(1,17):
if(R2 < TEMP1(I+((J-1)*17))) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL,IZ[KGAS][LGAS]]-(ELEV[I,IZ[KGAS][LGAS]]+ELEV[I,IZ[KGAS][LGAS]+1])*0.5-(ELEV[J,IZ[KGAS][LGAS]]+ELEV[J,IZ[KGAS][LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
counter117=1
while(counter117):
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter117=1
counter116=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 5TH GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-abs(ETEMP)
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO4()
# endif
GOTO4()
print(' ERROR IN CASCADE B5')
sys.exit()
GOTO100()
# end |
3,333 | 9c85252b4048b5412978b3ac05cd6dde4479e3bf | from ctypes import CDLL
svg2pdf = CDLL("./libsvg2pdf.so")
svg2pdf.svg2pdf("report.svg", "teste2.pdf")
svg2pdf.svg2pdf2("report.svg", "teste3.pdf")
|
3,334 | 61b28088e4344d8a94006e5c04c189a44bbb6ff3 | #!c:\Python\python.exe
# Fig 35.16: fig35_16.py
# Program to display CGI environment variables
import os
import cgi
print "Content-type: text/html"
print
print """<!DOCTYPE html PUBLIC
"-//W3C//DTD XHTML 1.0 Transitional//EN"
"DTD/xhtml1-transitional.dtd">"""
print """
<html xmlns = "http://www.w3.org/1999/xhtml" xml:lang="en"
lang="en">
<head><title>Environment Variables</title></head>
<body><table style = "border: 0">"""
rowNumber = 0
for item in os.environ.keys():
rowNumber += 1
if rowNumber % 2 == 0:
backgroundColor = "white"
else:
backgroundColor = "lightgrey"
print """<tr style = "background-color: %s">
<td>%s</td><td>%s</td></tr>""" \
% ( backgroundColor, item,
cgi.escape( os.environ[ item ] ) )
print """</table></body></html>"""
##########################################################################
# (C) Copyright 1992-2004 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
########################################################################## |
3,335 | 8de82d09c8a9a1c1db59b0cac9cf8dda04f35847 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import json
import os
import convlab
from convlab.modules.dst.multiwoz.dst_util import init_state
from convlab.modules.dst.multiwoz.dst_util import normalize_value
from convlab.modules.dst.state_tracker import Tracker
from convlab.modules.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA
class RuleDST(Tracker):
"""Rule based DST which trivially updates new values from NLU result to states."""
def __init__(self):
Tracker.__init__(self)
self.state = init_state()
prefix = os.path.dirname(os.path.dirname(convlab.__file__))
self.value_dict = json.load(open(prefix+'/data/multiwoz/value_dict.json'))
def update(self, user_act=None):
# print('------------------{}'.format(user_act))
if not isinstance(user_act, dict):
raise Exception('Expect user_act to be <class \'dict\'> type but get {}.'.format(type(user_act)))
previous_state = self.state
new_belief_state = copy.deepcopy(previous_state['belief_state'])
new_request_state = copy.deepcopy(previous_state['request_state'])
for domain_type in user_act.keys():
domain, tpe = domain_type.lower().split('-')
if domain in ['unk', 'general', 'booking']:
continue
if tpe == 'inform':
for k, v in user_act[domain_type]:
k = REF_SYS_DA[domain.capitalize()].get(k, k)
if k is None:
continue
try:
assert domain in new_belief_state
except:
raise Exception('Error: domain <{}> not in new belief state'.format(domain))
domain_dic = new_belief_state[domain]
assert 'semi' in domain_dic
assert 'book' in domain_dic
if k in domain_dic['semi']:
nvalue = normalize_value(self.value_dict, domain, k, v)
# if nvalue != v:
# _log('domain {} slot {} value {} -> {}'.format(domain, k, v, nvalue))
new_belief_state[domain]['semi'][k] = nvalue
elif k in domain_dic['book']:
new_belief_state[domain]['book'][k] = v
elif k.lower() in domain_dic['book']:
new_belief_state[domain]['book'][k.lower()] = v
elif k == 'trainID' and domain == 'train':
new_belief_state[domain]['book'][k] = normalize_value(self.value_dict, domain, k, v)
else:
# raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain))
with open('unknown_slot.log', 'a+') as f:
f.write('unknown slot name <{}> of domain <{}>\n'.format(k, domain))
elif tpe == 'request':
for k, v in user_act[domain_type]:
k = REF_SYS_DA[domain.capitalize()].get(k, k)
if domain not in new_request_state:
new_request_state[domain] = {}
if k not in new_request_state[domain]:
new_request_state[domain][k] = 0
new_state = copy.deepcopy(previous_state)
new_state['belief_state'] = new_belief_state
new_state['request_state'] = new_request_state
new_state['user_action'] = user_act
self.state = new_state
return self.state
def init_session(self):
self.state = init_state() |
3,336 | 6e6c6c5795e8723a86ae5dfc8f40df57d3dd10f7 | #!/usr/bin/env python
import argparse
import csv
import glob
import os
import sys
def run_main():
"""
Main function to process user input and then generate the description files for each run
:return: exit code -- 0 on success, 1 otherwise
"""
parser = argparse.ArgumentParser(description="Scan a run directory and create files to ")
parser.add_argument('--run-directory', dest='run_directory',
action='store', default='',
help='path to directory with xed files to process')
args = parser.parse_args(sys.argv[1:])
if not os.path.isdir(args.run_directory):
sys.stderr.write("{0} is not a directory, exiting\n".format(args.run_directory))
return 1
run_name = os.path.abspath(args.run_directory)
if os.path.basename(run_name):
run_name = os.path.basename(run_name)
else:
run_name = os.path.split(run_name)[0].split('/')[-1]
if not os.path.exists('info'):
os.mkdir('info')
for directory in os.listdir(args.run_directory):
if not os.path.isdir(os.path.join(args.run_directory, directory)):
continue
csv_filename = "info/{0}_{1}_files.csv".format(run_name, directory)
entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))
if len(entries) == 0:
continue
with open(csv_filename, 'w') as file_obj:
csv_writer = csv.writer(file_obj)
csv_writer.writerow(['Run', 'Data Set', 'File'])
for entry in entries:
uri = "srm://ceph-se.osgconnect.net:8443/srm/v2/" + \
"server?SFN=/cephfs/srm/xenon/" + \
entry.replace('/xenon/', '')
csv_writer.writerow([run_name, directory, uri])
if __name__ == '__main__':
sys.exit(run_main())
|
3,337 | 00587de133ee68415f31649f147fbff7e9bf65d5 | # Print name and marks
f = open("marks.txt", "rt")
for line in f:
line = line.strip()
if len(line) == 0: # Blank line
continue
name, *marks = line.split(",")
if len(marks) == 0:
continue
marks = filter(str.isdigit, marks) # Take only numbers
total = sum(map(int, marks)) # Convert str to it and sum it
print(f"{name:15} {total:4}")
f.close()
|
3,338 | 0dd17d8872b251fbc59a322bf3c695bd8079aba4 | #-*- coding: utf-8 -*-
"""
Django settings for HyperKitty + Postorius
Pay attention to settings ALLOWED_HOSTS and DATABASES!
"""
from os.path import abspath, dirname, join as joinpath
from ConfigParser import SafeConfigParser
def read_cfg(path, section=None, option=None):
config = SafeConfigParser()
config.read(path)
def get(section, option):
return config.get(section, option) if config.has_option(section, option) else None
return get(section, option) if section else get
mailman_cfg = read_cfg('/etc/mailman.cfg')
BASE_DIR = '/usr/lib/bundles/mailman-webui'
CONF_DIR = '/etc/mailman-webui'
DATA_DIR = '/var/lib/mailman-webui'
LOG_DIR = '/var/log/mailman-webui'
# Hosts/domain names that are valid for this site.
# NOTE: You MUST add domain name of your instance of this application here!
# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Mailman API credentials
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)
MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'
MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')
MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')
# REST API
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
}
# Only display mailing-lists in HyperKitty from the same virtual host
# as the webserver.
FILTER_VHOST = False
#
# Application definition
#
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'hyperkitty',
'rest_framework',
'django_gravatar',
'paintstore',
'compressor',
'haystack',
'django_extensions',
'postorius',
'django_mailman3',
'stronghold',
# Uncomment the next line to enable integration with Sentry
# and set DSN in RAVEN_CONFIG.
#'raven.contrib.django.raven_compat',
'allauth',
'allauth.account',
'allauth.socialaccount',
# Uncomment providers that you want to use, if any.
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django_mailman3.middleware.TimezoneMiddleware',
'postorius.middleware.PostoriusMiddleware',
# Uncomment to require a user to be authenticated to view any page.
#'stronghold.middleware.LoginRequiredMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Directory for templates override.
joinpath(DATA_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_mailman3.context_processors.common',
'hyperkitty.context_processors.common',
'postorius.context_processors.postorius',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Using the cache infrastructure can significantly improve performance on a
# production setup. This is an example with a local Memcached server.
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#
# Databases
# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': joinpath(DATA_DIR, 'db.sqlite3'),
}
# Remove the above lines and uncomment the below to use PostgreSQL.
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'mailman_webui',
# 'USER': 'mailman_webui',
# 'PASSWORD': 'change-me',
# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'HOST': '127.0.0.1',
# 'PORT': '',
# }
}
# Full-text search engine
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': joinpath(DATA_DIR, 'fulltext_index'),
},
}
#
# Outgoing mails
#
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
# The host and port of the SMTP server to use for sending email.
EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'
EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)
# Username and password to use for the SMTP server defined above.
EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''
EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''
# Whether to use a explicit TLS connection when talking to the SMTP server.
EMAIL_USE_TLS = False
# Whether to use an implicit TLS connection when talking to the SMTP server.
EMAIL_USE_SSL = False
# A tuple that lists people who get code error notifications. When DEBUG=False
# and a view raises an exception, Django will email these people with the full
# exception information. Each member of the tuple should be a tuple of (Full
# name, email address).
ADMINS = (
('Mailman Admin', 'root@localhost'),
)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL
#SERVER_EMAIL = 'root@your-domain.org'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email
#DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org'
#
# Security settings
#
# A secret key used for signing sessions, cookies, password reset tokens etc.
SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
#SECURE_REDIRECT_EXEMPT = [
# 'archives/api/mailman/.*', # Request from Mailman.
#]
#
# Authentication
#
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# Uncomment to next line to enable LDAP authentication.
#'custom.LDAPBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = 'account_login'
LOGIN_REDIRECT_URL = 'hk_root'
LOGOUT_URL = 'account_logout'
# Whether registration of new accounts is currently permitted.
REGISTRATION_OPEN = True
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' },
]
# URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware
# does not *force* them to require authentication.
STRONGHOLD_PUBLIC_URLS = (
r'^/accounts/.*',
r'^/archives/api/mailman/.*',
)
## Django Allauth
# Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable.
ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
ACCOUNT_UNIQUE_EMAIL = True
# Whether to disable intermediate logout page.
ACCOUNT_LOGOUT_ON_GET = False
SOCIALACCOUNT_PROVIDERS = {}
#SOCIALACCOUNT_PROVIDERS = {
# 'openid': {
# 'SERVERS': [
# {
# 'id': 'yahoo',
# 'name': 'Yahoo',
# 'openid_url': 'http://me.yahoo.com'
# }
# ],
# },
# 'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
# },
# 'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
# },
#}
## Django LDAP
if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:
import ldap
from django_auth_ldap.config import LDAPSearch
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')
AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'
AUTH_LDAP_USER_SEARCH = LDAPSearch(
'ou=People,dc=example,dc=org',
ldap.SCOPE_SUBTREE,
'(&(mail=*)(uid=%(user)s))'
)
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail',
}
#
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
#
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
#
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = joinpath(BASE_DIR, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static".
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# django-compressor
COMPRESS_OFFLINE = True
# Compatibility with Bootstrap 3
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
#
# Gravatar
# https://github.com/twaddington/django-gravatar
#
# Gravatar base url.
GRAVATAR_URL = 'http://cdn.libravatar.org/'
# Gravatar base secure https url.
GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'
# Gravatar size in pixels.
#GRAVATAR_DEFAULT_SIZE = '80'
# An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'.
GRAVATAR_DEFAULT_IMAGE = 'retro'
# One of the following: 'g', 'pg', 'r', 'x'.
#GRAVATAR_DEFAULT_RATING = 'g'
# True to use https by default, False for plain http.
GRAVATAR_DEFAULT_SECURE = True
#
# Logging
#
# A sample logging configuration. The only tangible logging performed by this
# configuration is to send an email to the site admins on every HTTP 500 error
# when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file':{
'level': 'INFO',
#'class': 'logging.handlers.RotatingFileHandler',
'class': 'logging.handlers.WatchedFileHandler',
'filename': joinpath(LOG_DIR, 'mailman-webui.log'),
'formatter': 'verbose',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
#'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
#},
'django.request': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
'postorius': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
'hyperkitty': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'root': {
'handlers': ['file'],
'level': 'INFO',
},
}
if 'raven.contrib.django.raven_compat' in INSTALLED_APPS:
RAVEN_CONFIG = {
'dsn': 'https://<key>:<secret>@sentry.io/<project>',
}
LOGGING['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
}
LOGGING['loggers']['root']['handlers'].append('sentry')
try:
from settings_local import *
except ImportError:
pass
|
3,339 | 894fa01e16d200add20f614fd4a5ee9071777db9 | # -*- coding: utf-8 -*-
from scrapy import Request
from ..items import ZhilianSpiderItem
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
class ZhilianSpider(RedisCrawlSpider):
name = 'zhilianspider'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
rules = [
Rule(LinkExtractor(restrict_xpaths='/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'), follow=True),
Rule(LinkExtractor(allow=r'http://jobs.zhaopin.com/(\d.+).htm'), callback='parse_zhilian')
]
def start_requests(self):
url = 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
yield Request(url, headers=self.headers)
def parse_zhilian(self, response):
_ = self
item = ZhilianSpiderItem()
item['job_id'] = response.url
item['job_name'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
item['job_company'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()
item['job_salary'] = response.xpath('/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first().strip()
item['job_education'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())
item['job_address'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()).strip()
item['job_category'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())
item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
if not item['job_description']:
item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
text = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
if text:
item['company_profile'] = text
if item['company_profile'] == '':
item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').extract()).replace(',', ',').replace('\r\n', '').strip()
else:
item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').extract()).replace(',', ',').replace('\r\n', '').strip()
yield item
|
3,340 | 8435a69ee9793435c7483df9bb15f01ef8051479 | movies = ["Abraham Lincoln", "Blue Steel", "Behind Office Doors", "Bowery at Midnight", "Captain Kidd", "Debbie Does Dallas", "The Emperor Jones", "Rain"]
movies_tuple = [("Abraham Lincoln", 1993), ("Blue Steel", 1938), ("Behind Office Doors", 1999), ("Bowery at Midnight", 2000), ("Captain Kidd",2010), ("Debbie Does Dallas",1908), ("The Emperor Jones", 2016), ("Rain", 2011)]
# selected_movies = []
# for title in movies:
# if title.startswith("B"):
# selected_movies.append(title)
#list_comprehension
# [expr for val in collection]
# [expr for val in collection if <test>]
# [expr for val in collection if <test> and <test2>]
# [expr for val1 in collection1 and val2 in collection2]
#find movies that starts with "B"
selected_movies = [title for title in movies if title.startswith("B")]
print(selected_movies)
#this is for tuples--- find movies released before 2000
selected_movies2 = [title for (title, year) in movies_tuple if year <2000 ]
print (selected_movies2) |
3,341 | 97bbbbe6a3a89b9acc22ebdff0b96625d6267178 | import numpy as np
import itertools as itt
from random import random
from sys import float_info
DIGITS = 3
ACCURACY = 0.001
UP_MAX = 30
class AngleInfo(object):
def __init__(self, information):
# 0 <= spin <= 360
# 0 <= up <= UP_MAX
# -1 <= sin, cos <= 1
if len(information) == 2:
# initialize with angles
spin = round(information[0] % 360, DIGITS)
up = round(information[1], DIGITS)
#print "\tangle - spin:%f, up:%f" % (spin, up)
if spin < 0 or 360 < spin or up < 0 or UP_MAX < up:
# invalid angles
up = None
spin = None
elif len(information) == 3:
# initialized with trigon. function
sin_s = information[0]
cos_s = information[1]
sin_u = information[2]
#print "\ttrigo - ss:%f, cs:%f, su:%f" % (sin_s, cos_s, sin_u)
if reduce(
lambda acc, item:
acc & (-1 <= item and item <= 1),
[sin_s, cos_s, sin_u],
True):
# denormalization
sin_u_org = sin_u * (np.sin(np.radians(UP_MAX)) / 1.0)
up = np.rad2deg(np.arcsin(sin_u_org))
spin = AngleInfo.calculateSpinAngle(sin_s, cos_s)
else:
# invalid trigon. func values
up = None
spin = None
if spin != float_info.max:
self.spin = round(spin, DIGITS)
self.up = round(up, DIGITS)
else:
self.spin = None
self.up = None
def getAngles(self):
return (self.spin, self.up)
def getVectors(self):
if self.spin is None or self.up is None:
return (None, None, None)
else:
return (np.sin(np.radians(self.spin)),
np.cos(np.radians(self.spin)),
np.sin(np.radians(self.up)) / np.sin(np.radians(UP_MAX)))
@staticmethod
def calculateSpinAngle(sin_s, cos_s):
spin_fsin = np.rad2deg(np.arcsin(sin_s))
if spin_fsin < 0:
spin_fsin = spin_fsin + 360
spin_fcos = np.rad2deg(np.arccos(cos_s))
if spin_fcos < 0:
spin_focs = spin_fcos + 360
angles_fsin = set([spin_fsin % 360, (540 - spin_fsin) % 360])
angles_fcos = set([spin_fcos % 360, (360 - spin_fcos) % 360])
angles = list(itt.product(angles_fsin, angles_fcos))
res = None
for i in angles:
if abs(i[0] - i[1]) < ACCURACY:
res = (i[0] + i[1]) / 2.0
return (res if res is not None else float_info.max)
@staticmethod
def getRandomVector():
spin = random() * 360
up = random() * 30
return (np.sin(np.radians(spin)), np.cos(np.radians(spin)), np.sin(np.radians(up)) / np.sin(np.radians(UP_MAX)))
def main():
s = 100
u = 100
for i in range(s):
for j in range(u):
a = AngleInfo(AngleInfo.getRandomVector())
b = AngleInfo(a.getVectors())
print a.getAngles(), b.getAngles(), a.getVectors(), b.getVectors()
if not a.getAngles() == b.getAngles() or not a.getVectors() == b.getVectors():
print "check failed at %d %d" % (i, j)
if __name__ == '__main__':
main()
|
3,342 | 6d244b719200ae2a9c1a738e746e8c401f8ba4e2 | from django.conf.urls.defaults import *
## reports view
urlpatterns = patterns('commtrack_reports.views',
(r'^commtrackreports$', 'reports'),
(r'^sampling_points$', 'sampling_points'),
(r'^commtrack_testers$', 'testers'),
(r'^date_range$', 'date_range'),
(r'^create_report$', 'create_report'),
(r'^export_csv$', 'export_csv'),
(r'^export_pdf$', 'pdf_view'),
# (r'^test$', 'test'),
)
|
3,343 | d7daf9b26f0b9f66b15b8533df032d17719e548b | """
This is a post login API and hence would have APIDetails and SessionDetails in the request object
-------------------------------------------------------------------------------------------------
Step 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4
Step 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes
Step 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output
Step 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output
"""
"""
INPUT:
{
"APIDetails":{
"token_type":1,
"token_vendor_id":1,
"token_string":"sdxfcgvbhjnmklasdfghjk",
"dev_key":"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234"
},
"SessionDetails":{
"profile_id":159,
"session_id":787,
"session_key":"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO"
},
"APIParams":{
"user_ip" : "192.168.0.1"
}
}
"""
"""
OUTPUT:
{
"AuthenticationDetails": {
"Status": "Success",
"Message": "ApiDetails fine to process"
},
"SessionDetails": {
"Status": "Success",
"Message": "session is active. session details updated",
"Payload": {
"profile_id": 159,
"session_id": 787,
"session_key": "LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB"
}
},
"Payload": {
"Status": "Success",
"Message": "ticket types and respective questions Fetched successfully",
"Payload": {
"geo_id": 2,
"geo_name": "Indian Subcontinent",
"geo_currency": "INR"
}
}
}
""" |
3,344 | 9cb3d8bc7af0061047136d57abfe68cbb5ae0cd7 | '''给定一个只包含小写字母的有序数组letters 和一个目标字母 target,寻找有序数组里面比目标字母大的最小字母。
数组里字母的顺序是循环的。举个例子,如果目标字母target = 'z' 并且有序数组为 letters = ['a', 'b'],则答案返回 'a'。输入:
示例:
letters = ["c", "f", "j"]
target = "a"
输出: "c"
'''
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
list_a = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
index_target = list_a.index(target)
for i in range(index_target + 1,len(list_a)):
if list_a[i] in letters:
return list_a[i]
return letters[0] #以上查询没找到以后,输出列表第一项
class SolutionBest(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for i in letters: #题目都说了,有序数组,直接迭代就好
if i > target:#惊不惊喜,字母之间在python是可以直接“比较大小”的
return i
return letters[0] |
3,345 | 00f95733505b3e853a76bbdd65439bcb230fa262 | import subprocess
import glob
import os
import time
import sys
import xml.etree.ElementTree as ET
import getpass
import psutil
if len(sys.argv)==1:
photoscanname = r"C:\Program Files\Agisoft\PhotoScan Pro\photoscan.exe"
scriptname = r"C:\Users\slocumr\github\SimUAS\batchphotoscan\agiproc.py"
#xmlnames = r"P:\Slocum\USVI_project\01_DATA\20180319_USVI_UAS_BATHY\02_PROCDATA\06_PROCIMAGES\*\06_QUICKPROC\*2.xml"
xmlnames = r"C:\Users\slocumr\github\SimUAS\data\testagiproc\06_QUICKPROC\*.xml"
nprocesses = 1
else:
photoscanname = sys.argv[1]
scriptname = sys.argv[2]
xmlnames = sys.argv[3]
nprocesses = 1
SLEEPTIME = 10
DODEBUG = True
# get xmlfiles
xmlfiles = glob.glob(xmlnames)
nfiles = len(xmlfiles)
# empty lists
processes = []
procname = []
procind = []
logname = []
currentloghandles = []
currentind = []
proclog = open("simUASagiproc_log.log",'at')
try:
# detect already processed or processing folders
nexist = 0
for i,fname in enumerate(xmlfiles):
rootdir,f = os.path.split(fname)
rootoutput = ET.parse(fname).getroot().find('export').get('rootname')
logname.append( rootdir + "/" + rootoutput + "/autoproc.log" )
procind.append(i)
if os.path.exists(rootdir + "/" + rootoutput + "/autoproc.log"):
nexist = nexist+1
print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles))
proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\n')
for fname,i,logfile in zip(xmlfiles,procind,logname):
i = i+1
if not os.path.exists(logfile):
currentind.append(i)
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname + '\n')
foldername,foo = os.path.split(logfile)
if not os.path.exists(foldername):
os.makedirs(foldername)
iloghandle = open(logfile,'wt')
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.write(getpass.getuser() + "\n")
iloghandle.flush()
currentloghandles.append(iloghandle)
processes.append(subprocess.Popen([photoscanname,"-r",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle))
procname.append(fname)
while len(processes)>=nprocesses:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):
if p.poll() is not None:
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n')
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]
currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]
currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
# Wait for everything to finish
while len(processes)>0:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):
if p.poll() is not None:
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n')
iloghandle= log
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]
currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]
currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
except KeyboardInterrupt:
for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles):
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name + '\n')
p.kill()
iloghandle.flush()
iloghandle.close()
time.sleep(0.1)
os.remove(logname[ind-1])
proclog.flush()
proclog.close()
print("Done")
|
3,346 | 1c6077d965f5bc8c03344b53d11851f5cd50bca8 | from Task2.src.EmailInterpreter import EmailInterpreter
import os
# Part B:
# -------
# Write a child-class of the previously written base class, which
# implements the 'split_file' function, simply by treating each line as a
# unit (it returns the list of lines).
class LineBreaker(EmailInterpreter):
def split_file(self, file_name):
with open(os.path.join(self.directory_path, file_name), 'r') as file:
lines = file.readlines()
return lines |
3,347 | b091d00f5b5e997de87b36adbe9ce603a36ca49c | from django.apps import AppConfig
class ScambioConfig(AppConfig):
name = 'scambio'
|
3,348 | 2a6b373c443a1bbafe644cb770bc163536dd5573 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Jul 19 16:02:11 2010
# by: The Resource Compiler for PyQt (Qt v4.6.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x07\xb6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xaf\xc8\x37\x05\x8a\xe9\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\x48\x49\x44\x41\x54\x78\xda\x62\
\xfc\xff\xff\x3f\xc3\x40\x02\x80\x00\x62\x62\x18\x60\x00\x10\x40\
\x03\xee\x00\x80\x00\x1a\x70\x07\x00\x04\xd0\x80\x3b\x00\x20\x80\
\x58\x40\x84\xb1\x71\x2a\xc3\x8f\x1f\x3f\x99\x7e\xfe\xfc\xce\xfd\
\xff\xff\xbf\xbf\x4a\x4a\xaa\x86\x52\x52\x02\x59\x3c\x3c\x8c\x66\
\x7f\xfe\xfc\x61\x7e\xf2\xe4\xed\x8d\xd3\xa7\xf7\x2d\xfb\xf6\xed\
\xdd\x76\x25\x25\xc7\x9f\x0c\x0c\xff\xa0\xda\x19\x21\xbe\x60\x82\
\xf8\x83\x91\x91\x11\x8e\x99\x98\x18\x19\x40\xe9\x1b\xc4\x86\xc9\
\x81\xc4\x98\x98\x98\x19\x7f\xff\xfe\xf4\xe5\xcc\x99\xa5\xe0\xd4\
\x0f\x10\x40\x60\x07\xfc\xfb\xf7\xdf\x4e\x45\x45\xa6\x41\x50\x90\
\x97\x07\x98\x2b\xfe\x72\x70\x70\x6a\x70\x71\x31\x0a\xb0\xb1\x31\
\x81\x2d\xe1\xe5\xe5\x55\x94\x92\x8a\x75\xff\xf7\xef\xef\x65\x06\
\x86\xff\x3f\x41\x86\xb0\xb3\xb3\x83\x03\x10\x64\xfe\xef\xdf\xbf\
\x41\x06\x83\x2d\x01\x7a\x00\xcc\x06\x3b\xe0\xdf\x7f\x86\xff\x70\
\x87\x81\x4c\x02\x3a\x80\x99\x99\xf1\xc5\x8b\x97\xaf\x1e\x3c\x50\
\xa9\x7f\xf3\xe6\xce\x19\x80\x00\x02\x3b\x40\x4d\x4d\x76\x62\x6f\
\x6f\x9a\x81\xa0\x20\x1f\xd8\xd5\x9f\x3f\x7f\x67\xf8\xf8\xf1\x2b\
\xc3\xb7\x6f\xbf\xc1\x7c\x16\x16\x66\x06\x56\x56\x56\xa0\x47\x99\
\xf4\x41\x86\xb1\xb1\x73\x30\x5c\xbf\x7c\x9c\xe1\xf1\x83\x3b\x0c\
\x6a\xda\x46\x0c\xc2\x22\x62\x40\x75\x7f\x80\x1e\xf9\xc7\xc0\xcc\
\xcc\xcc\xf0\xf7\x2f\x13\x03\x07\x07\x1f\x03\x3b\x3b\x50\x2d\x0b\
\x23\x03\x33\x28\x10\x18\x99\x18\xbe\x7c\xfd\xc1\xf0\xfd\x27\x2b\
\xd0\xfc\xbf\x0c\xf7\xef\x5f\x66\x02\x3a\x20\x09\x20\x80\xc0\x0e\
\x10\x15\xe5\x96\x65\x61\xf9\xc7\xf0\xe5\xcb\x67\xb0\xeb\x3f\x7d\
\xfa\xca\xf0\xf2\xe5\x7b\x86\x0f\x1f\xbe\x83\x83\x8f\x99\x99\x05\
\x8c\x41\x72\x5c\x9c\x5c\x0c\x77\x6f\x9f\x60\x68\x59\x75\x9c\x41\
\xc9\x3a\x80\xe1\x45\xd3\x74\x86\x25\xfd\xb9\x0c\x4b\x96\xaf\x66\
\xf8\xf8\xe1\x03\x43\x45\x45\x25\xd8\xe7\x97\xaf\x5e\x64\x10\x91\
\x92\x65\x10\x92\x94\x61\x78\xf1\x8d\x91\xe1\xf9\xd7\xff\x0c\xaf\
\xdf\xfd\x64\xe0\x7a\x78\x83\x41\x54\x50\x11\xe8\x40\x0e\x05\xa0\
\xd1\x0a\x00\x01\x04\x8e\xbc\x5f\xbf\x7e\xfd\xfd\xfd\xfb\x2f\x3c\
\xbe\x50\xe3\x0c\x82\x41\x7c\x66\x66\x26\x70\xb4\xbf\x7c\x76\x93\
\xe1\xfe\x27\x1e\x86\xdd\x8f\xa5\x18\x18\x39\x44\x19\x04\xf8\x78\
\x18\x56\x2c\x5f\xc1\xb0\x60\xc1\x22\x86\xef\xdf\x7f\x30\x28\xab\
\x28\x33\xd8\x58\x9b\x31\x3c\xff\xc6\xc4\x70\xe6\xfe\x67\x86\xcb\
\xf7\xde\x30\x7c\xff\xf2\x9b\x81\xf9\xe7\x37\x06\x0e\x60\xd4\xfd\
\xfe\xf5\x07\x18\x6d\x7f\x41\x96\xb1\x01\x04\x10\x0b\x2c\x31\x21\
\xd9\x8d\x15\x40\x1c\xc7\xc8\xf0\xf8\xf1\x0d\x06\x77\x5f\x6f\x06\
\x0e\xc1\x13\x0c\x07\x8f\x75\x31\x64\x97\x86\x30\xc8\x29\x6b\x31\
\x2c\x5d\xba\x14\x68\xf9\x77\x06\x0d\x0d\x75\x60\x82\xfe\x0d\x8c\
\x32\x76\x06\x0b\x25\x01\x86\x5f\x3f\x7e\x32\x5c\xb9\x72\x95\x41\
\x98\x4b\x8d\x81\x55\x90\x9f\xe1\x1d\x23\x3b\x30\x7a\x7f\xc2\x3c\
\xfb\x1f\x20\x80\x58\x88\xcd\x2e\x20\xdf\xbf\x7a\xf5\x88\x41\x4c\
\x8c\x9f\x41\x52\x52\x9e\x21\x39\x5e\x99\x21\x3b\x25\x92\x81\x85\
\x83\x07\x2c\x6f\x67\x67\x07\x57\xfb\xfb\x37\x24\x97\xf0\xf0\xf0\
\x32\xfc\x66\xe7\x62\x30\x30\x34\x66\xb8\x78\xf1\x1a\x83\xa4\x94\
\x38\x30\x3d\x81\x92\xe5\x0f\xb8\x87\x01\x02\x88\x05\xe1\xbb\xff\
\x78\x7c\xcf\x04\xf4\xd5\x0f\xa0\xaf\xfe\x30\xc8\xc9\x29\x83\x83\
\x99\x81\x81\x95\x81\x81\x9b\x93\x81\x0d\x18\x9c\x5f\xbe\x7c\x02\
\x3a\xee\x05\x58\x0d\x1f\x1f\x3f\x30\x4d\x49\x80\x13\xee\xb7\x6f\
\x3f\xc1\x39\x84\x99\x85\x1d\x68\xb9\x08\xc3\xa3\x47\x57\x18\x04\
\x04\x54\x19\x90\xab\x1f\x80\x00\x22\x2a\x04\x40\x89\xef\xc3\x87\
\x97\x0c\xb2\xb2\x52\xc0\x14\xfe\x1f\xec\x58\x90\xa3\x81\xd9\x92\
\xe1\xde\xbd\x07\x0c\x2f\x5e\xbc\x06\x0a\xb1\x03\x2d\x62\x03\x26\
\xde\x27\xc0\x68\x7a\xc2\xa0\xa2\xa2\xca\xc0\xc5\xc5\x0f\x4c\x5f\
\xa0\xf8\xfe\xc3\x20\x2c\x2c\x0e\x4e\xd8\x3f\x7e\x7c\x87\x46\x39\
\x24\x08\x00\x02\x88\x89\x50\x81\x08\x52\xf8\xf7\xef\x6f\xa0\x41\
\x5f\x19\xd8\xd8\xb8\xc0\x96\x42\xa2\x84\x99\xe1\xcd\x9b\x97\x0c\
\xaf\x5f\xbf\x63\xe0\xe1\x95\x64\x78\xfd\xe6\x23\xc3\xb9\x73\x67\
\x19\x38\x38\x85\x80\x39\x8e\x87\xe1\xc6\x8d\x6b\x0c\xc0\x82\x0d\
\x5a\x36\x00\xcb\x83\xff\x4c\x0c\xdc\xdc\xec\xc0\xd0\x7a\x0b\x0c\
\x1d\x84\xbf\x01\x02\x88\x09\x62\x09\xde\xe4\x07\xf4\xc1\x2f\x06\
\x4e\x4e\xa0\x0f\x99\x59\xc1\x86\xc1\x7c\xff\xfe\xfd\x7b\x06\x31\
\x71\x39\x86\x53\xa7\x8e\x30\x24\xa7\x84\x30\x14\x15\xa5\x02\xb3\
\x61\x16\xb0\xe0\xe2\x07\x46\x17\x17\xc3\xdb\xb7\xaf\x80\x96\x41\
\x3c\xf7\xf7\xef\x5f\xb0\x19\x3f\x7e\x7c\x00\x47\x29\x0c\x00\x04\
\x10\x11\x0e\x60\x00\x5b\x0a\x2a\xf9\x60\x6d\x07\x50\xd1\xfb\xf3\
\xe7\x0f\x70\xaa\x11\x13\x17\x65\x58\xb8\x60\x26\xc3\xe7\x4f\x9f\
\x40\x25\x2a\xc3\x89\xe3\x47\x18\xce\x9c\x39\xc6\x20\x2e\x21\x05\
\x2c\x70\x3e\xc2\x2d\x83\x98\xc1\x01\xe4\xff\x03\xab\x83\x15\xe3\
\x00\x01\xc4\x84\x5c\xa6\xe3\x03\x10\x4d\x08\x07\x81\x1c\xf1\x0f\
\xe8\xab\xff\xc0\x7a\x41\x50\x48\x18\x45\x2d\xbf\x00\x3f\xc3\x1f\
\x60\xb4\xfd\xfd\xfb\x0f\x29\x2a\x19\xc0\x25\xe5\xbf\x7f\xa8\xe6\
\x02\x04\x10\x52\x2e\xc0\x57\x06\x30\x83\xf3\x38\xc2\x31\xff\x80\
\xe9\x81\x13\x98\xe8\x58\x18\x5e\x3e\x7f\xca\x50\x5e\x51\xcd\xf0\
\x13\x98\xb8\x5e\x3c\x7f\xce\xe0\xe1\xed\xc3\x60\x6e\x6e\xc9\x70\
\xe3\xfa\x45\x06\x49\x09\x49\x68\x94\x41\xec\x00\x25\x40\x26\x26\
\x56\xb0\xe3\x61\x76\x02\x04\x10\x0b\xa1\x28\x00\x19\xc0\xc6\xc6\
\x06\xcc\x05\xff\x80\x3e\xfa\x05\x54\xcb\x0e\x97\x13\x03\xd6\x01\
\xf7\xee\xdf\x65\x10\x11\x91\x60\x98\x39\x7b\x3e\x38\x7b\xf2\xf0\
\x70\x33\xdc\xbd\x73\x1d\x58\xda\xfd\x64\x90\x90\x90\x02\xc7\x3d\
\xac\x3e\xf9\xf1\xe3\x17\x50\x5e\x84\x01\x58\xc3\xc2\xed\x04\x08\
\x20\x22\xb2\x21\x24\xb8\x39\x39\xf9\x19\xbe\x7e\xfd\xc4\xc0\xcf\
\x2f\x08\x4f\x54\xc2\x22\xa2\x40\xd7\xff\x67\x78\xf2\xe4\x31\xc3\
\xdb\x9f\x7f\x80\xc1\xfe\x87\xe1\xc5\xb3\x3f\xc0\xc4\xca\xc8\x60\
\x6a\x6a\x01\x2e\x0d\x7f\xff\xfe\x01\xcd\x49\xbf\x80\x69\xe2\x27\
\x30\x27\x08\x00\x13\xef\x57\x78\xb4\x03\x04\x10\x51\x51\x00\xb2\
\x8c\x8f\x4f\x88\xe1\xd9\xb3\x7b\xc0\x52\x50\x12\xe8\x20\x48\x28\
\x80\x7c\x22\x21\x21\x0d\x2c\x1d\x45\x81\x86\x7f\x01\xe7\x16\x16\
\x16\x56\xa0\x2f\xf9\xc1\xf1\xff\xfd\xfb\x4f\xb0\x3a\x36\x36\x56\
\x86\xdb\xb7\x6f\x03\x1d\x26\x08\xae\xd4\x90\x01\x40\x00\x11\x99\
\x08\xff\x83\x35\x72\x72\x8a\x82\xf3\x37\x28\x38\x61\x8e\xfe\x0b\
\xf4\x35\x0b\xb0\xa4\x13\x11\x11\x01\x3a\x4e\x0a\x48\x8b\x82\x83\
\x17\xe4\x68\x10\x60\x65\x65\x01\x46\xdf\x6b\x60\xf9\xff\x97\x41\
\x46\x46\x0d\xac\x1e\xb9\xee\x01\x08\x20\xa2\xb2\x21\x2c\x14\x84\
\x84\x44\x81\x05\x12\x2b\xc3\xe5\xcb\xe7\xc0\x7a\x40\x69\x03\x51\
\xd0\x20\xd4\x81\xf8\xa0\x82\x8a\x9d\x9d\x8d\xe1\xdd\xbb\xd7\x0c\
\x77\xee\x3c\x06\x56\x52\xe6\xf0\x34\x85\x0c\x00\x02\x88\x09\x5a\
\xaa\x31\x83\x2a\x1b\x6c\x55\x30\x72\x13\x0b\x54\xf8\x48\x4a\x2a\
\x00\x83\x96\x95\xe1\xc8\x91\xa3\x0c\xcf\x9f\x3f\x01\xfa\x90\x19\
\x5c\xd8\xc0\x13\x15\x30\x74\x40\x05\x0e\xa8\x14\xbc\x7a\xf5\x32\
\x30\xe8\x9f\x31\xe8\xe8\x58\x03\x8b\x65\x0e\x70\x6b\x09\x62\x16\
\x13\xb8\x69\x06\x52\x0f\x10\x40\xe0\x08\x79\xf2\xe4\xcd\xed\x87\
\x0f\xdf\x09\x2b\x2a\x4a\x01\xe3\x15\x54\x9b\x81\xe2\x17\x98\xcf\
\xff\x31\xc1\x2b\x23\x18\x06\xb5\x1b\x84\x85\xe5\x80\x29\xfa\x1b\
\xb0\x04\xbc\x07\xac\x6a\xef\x01\x43\x86\x17\xe8\x30\x71\xb0\x8f\
\x3f\x00\x1b\x25\xcf\x9e\x01\xeb\xff\xef\xff\x80\xe9\x46\x8a\x41\
\x5b\x5b\x05\xec\xf8\x5f\xbf\x80\x25\xc6\x3f\x90\x67\x58\x80\x0d\
\x9e\xcf\xa0\x68\x01\xd5\x68\xff\x01\x02\x88\x11\x14\x24\x52\x52\
\x7a\x76\xe2\xe2\x72\xd5\xbc\xbc\xdc\xb2\xa0\xf8\x06\x66\xf3\xff\
\xa0\x82\x07\x56\xec\x22\xb7\x07\x40\x86\xc1\x2a\x28\x50\xf9\x00\
\xf2\xe9\xb7\x6f\x9f\x80\x21\xc1\x08\xf6\x3d\xa8\xf2\x01\x25\x52\
\x7e\x7e\x21\x50\x33\x0e\x68\xd6\x1f\xa8\xc5\x90\xb4\x04\x64\x33\
\x7e\xfc\xf8\xe6\xe7\xb5\x6b\x07\xf7\x7c\xff\xfe\x69\x0e\x40\x00\
\x31\x42\x2d\x01\x49\x4b\x01\xb1\x1c\x28\x46\xe8\xd0\x1a\x07\x95\
\x87\xc0\x2a\x94\xe1\x11\x40\x00\x31\x0e\x74\xdf\x10\x20\x80\x06\
\xbc\x63\x02\x10\x40\x03\xee\x00\x80\x00\x1a\x70\x07\x00\x04\x18\
\x00\x4e\x12\xc6\x99\x32\x89\xe5\xec\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0a\
\x0c\x91\x67\x27\
\x00\x63\
\x00\x61\x00\x6d\x00\x65\x00\x72\x00\x61\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
3,349 | 73a4b3497952f90029ba24b73b835de53fc687ec | import constants
from auth.storage import Storage
from utils import create_error_with_status
from flask import jsonify, request, current_app
def register_user():
try:
email = request.json["email"]
password = request.json["password"]
except KeyError:
status = constants.statuses["user"]["missingData"]
body = create_error_with_status(status, "missing user data")
current_app.logger.warn("Not enough data for sing-up")
return jsonify(body), constants.responses[status]
current_app.logger.info(f"Sing up for {email}")
status = Storage.add_user(email, password)
http_status = constants.responses[status]
if status == constants.statuses["user"]["created"]:
body = dict(status=status, email=email)
else: # status == constants.statuses["user"]["emailUsed"]:
body = create_error_with_status(status, "email {{email}} is already registered", email=email)
return jsonify(body), http_status
|
3,350 | 6bd47fb71a32b8383a75e72111d802008bc6bc68 |
# coding: utf-8
# In[2]:
from HSTLens_base_classifier_resnet17_s import BaseKerasClassifier
from keras.layers import Activation, AveragePooling2D, MaxPooling2D
from keras.layers import Conv2D, ELU, Dropout, LeakyReLU
from keras.layers.normalization import BatchNormalization
class deeplens_classifier(BaseKerasClassifier):
def _model_definition(self, net):
"""
Builds the architecture of the network
"""
# Input filtering and downsampling with max pooling
print(net.shape) #channels must be specified first otherwise keras assumes channels last
print('resnet17_scp')
net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same',
data_format="channels_first", input_shape=(1, 100, 100))(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format="channels_first")(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format="channels_first")(net)
net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels
net = LeakyReLU()(net)
net= MaxPooling2D(pool_size=(2,2))(net)
return net
# In[ ]:
|
3,351 | f3da38f2c4fda0a1d54e79c2c21070f98002b88d | # -*- coding=utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Default configs."""
from .base import BaseConfig
from zeus.common import ConfigSerializable
class CityscapesCommonConfig(BaseConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
root_path = None
num_parallel_batches = 64
fixed_size = True
train_portion = 1.0
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesConfig = {"batch_size": {"type": int},
"root_path": {"type": str},
"num_parallel_batches": {"type": int},
"fixed_size": {"type": bool}
}
return rules_CityscapesConfig
class CityscapesTrainConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'train.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTrainConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesTrainConfig
class CityscapesValConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesValConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesValConfig
class CityscapesTestConfig(CityscapesCommonConfig):
"""Default Dataset config for Cityscapes."""
batch_size = 1
list_path = 'val.txt'
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_CityscapesTestConfig = {"batch_size": {"type": int},
"list_path": {"type": str}
}
return rules_CityscapesTestConfig
class CityscapesConfig(ConfigSerializable):
"""Default Dataset config for Cityscapes."""
common = CityscapesCommonConfig
train = CityscapesTrainConfig
val = CityscapesValConfig
test = CityscapesTestConfig
@classmethod
def rules(cls):
"""Return rules for checking."""
rules_Cityscapes = {"common": {"type": dict},
"train": {"type": dict},
"val": {"type": dict},
"test": {"type": dict}
}
return rules_Cityscapes
@classmethod
def get_config(cls):
"""Get sub config."""
return {'common': cls.common,
'train': cls.train,
'val': cls.val,
'test': cls.test
}
|
3,352 | 85fff1f6e1f69dd0e2e9b5acc90db31d27329c7c | from django import forms
class PasswordChangeForm(forms.Form):
password = forms.CharField(min_length=8,
label="New Password*",
strip=False,
widget=forms.PasswordInput(
attrs={'autocomplete': 'current-password', 'class': 'form-control'}),
)
|
3,353 | 70fcf25cd7d70972e8042dc882f6ecb12d36461a | from django.shortcuts import render,redirect,get_object_or_404
from .models import Blog,UseCase,Comment
from courses.models import offerings
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.template.defaultfilters import slugify
from .forms import addMainContent
# Create your views here.
def home(request):
only_pub_blog = Blog.objects.filter(status=1)
only_active_course = offerings.objects.filter(course_status=1)
ony_pub_ucase = UseCase.objects.filter(usecase_status=1)
return render(request,'dlblog/home.html',{'blog':only_pub_blog[0:3],'usecase':ony_pub_ucase[0:3],'courses':only_active_course[0:3]})
def blogs(request):
only_pub_blog = Blog.objects.filter(status=1)
return render(request,'dlblog/blogs.html',{'blog':only_pub_blog})
'''
def login(request):
if request.method == 'POST':
user = auth.authenticate(username=request.POST['username'],password=request.POST['password'])
# if user is present
if user is not None:
# do the login
auth.login(request,user)
# if user is present and enters valid credentials
return render(request,'dlonboarding/userhome.html',{'user':user})
else:
return render(request,'dlblog/signin.html',{'error':"Please enter valid Credentials!"})
else:
return render(request, 'dlblog/signin.html',{'error':"Please enter valid Credentials!"})
'''
@login_required
def newblog(request):
return render(request,'dlblog/newblog.html')
'''
@login_required
def create(request):
if request.method == 'POST':
if request.FILES['blog_main_image'] and request.POST['title'] and request.POST['summary'] and request.POST['content']:
blog = Blog()
# Fields 1 Blog Image.
blog.blog_main_image = request.FILES['blog_main_image']
# Fields 2 Blog Title.
blog.title = request.POST['title']
# Fields 3 Blog Summary.
blog.summary = request.POST['summary']
# Fields 4 Blog Slug.
blog.slug = slugify(request.POST['title'])
# Fields 5 Blog Author.
blog.author = request.user
# Fields 6 Blog Content
blog.content = request.POST['content']
blog.save()
return redirect('/dlblog/' + blog.slug)
else:
return render(request, 'dlblog/newblog.html', {'error': 'Please enter all details'})
else:
return render(request, 'dlblog/newblog.html')
'''
def add_comment(request):
if request.method == "POST":
form = addMainContent(request.POST,request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.author = request.user
instance.slug = slugify(request.POST['title'])
instance.save()
return redirect('/dlblog/'+instance.slug)
else:
form = addMainContent()
return render(request, 'dlblog/blog_home.html',{'form':form})
@login_required
def create(request):
if request.method == "POST":
form = addMainContent(request.POST,request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.author = request.user
instance.slug = slugify(request.POST['title'])
instance.save()
return redirect('/dlblog/'+instance.slug)
else:
form = addMainContent()
return render(request, 'dlblog/newblog.html',{'form':form})
@login_required
def edit_blog(request,slug):
blog = get_object_or_404(Blog, slug=slug)
if request.method == "POST":
form = addMainContent(request.POST ,request.FILES,instance=blog)
try:
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return redirect('/dlblog/'+instance.slug)
except Exception as e:
print("Error :", e)
else:
form = addMainContent(instance=blog)
slug = blog.slug
return render(request, 'dlblog/editblog.html', {'form': form,'slug':slug})
def blog_home(request,slug):
blog = get_object_or_404(Blog, slug=slug)
return render(request,'dlblog/blog_home.html',{'blog': blog})
|
3,354 | 8205541dcdd4627a535b14c6775f04b80e7c0d15 | '''
Created on Dec 23, 2011
@author: boatkrap
'''
import kombu
from kombu.common import maybe_declare
from . import queues
import logging
logger = logging.getLogger(__name__)
import threading
cc = threading.Condition()
class Publisher:
def __init__(self, exchange_name, channel, routing_key=None):
self.exchange_name = exchange_name
self._producer = None
self.exchange = None
self.channel = channel
self.routing_key_list = []
self.routing_key = routing_key
self.reconnect(channel)
def reconnect(self, channel):
cc.acquire()
self.exchange = kombu.Exchange(
self.exchange_name, type="direct", durable=True)
self.channel = channel
try:
self._producer = kombu.Producer(exchange=self.exchange,
channel=channel, serializer="json",
routing_key=self.routing_key)
if self.routing_key:
self.queue_declare(self.routing_key)
except Exception as e:
logger.exception(e)
cc.release()
def queue_declare(self, routing_key):
if routing_key is None:
return
if routing_key in self.routing_key_list:
return
self.routing_key_list.append(routing_key)
queue = queues.QueueFactory().get_queue(self.exchange, routing_key)
if queue:
queue(self.channel).declare()
def send(self, message, routing_key=None):
result = False
cc.acquire()
try:
self._producer.publish(message, routing_key=routing_key)
result = True
except Exception as e:
logger.exception(e)
logger.debug("wait for connection")
cc.release()
return result
def drop_routing_key(self, routing_key):
logger.debug("drop_routing_key: %s" % routing_key)
if routing_key in self.routing_key_list:
self.routing_key_list.remove(routing_key)
class TopicPublisher(Publisher):
def __init__(self, exchange_name, channel, routing_key=None):
super().__init__(exchange_name, channel, routing_key)
def reconnect(self, channel):
self.exchange = kombu.Exchange(
self.exchange_name, type="topic", durable=True)
self.channel = channel
self._producer = kombu.Producer(exchange=self.exchange,
channel=channel, serializer="json",
routing_key=self.routing_key)
class PublisherFactory:
def __init__(self, channel):
self.channel = channel
def get_publisher(self, key):
publisher = None
logger.debug("routing_key: %s" % key)
if key == "nokkhum_compute.update_status":
routing_key = "nokkhum_compute.update_status"
publisher = Publisher(
"nokkunm_compute.update_status", self.channel, routing_key)
return publisher
else:
import fnmatch
import re
regex = fnmatch.translate('nokkhum_compute.*.rpc_*')
reobj = re.compile(regex)
if reobj.match(key):
routing_key = key
if "nokkhum_compute.*.rpc_response" in routing_key:
publisher = TopicPublisher(
"nokkunm_compute.compute_rpc", self.channel, routing_key)
elif "nokkhum_compute.*.rpc_request":
publisher = TopicPublisher(
"nokkunm_compute.rpc", self.channel, routing_key)
# logger.debug("get pub: %s"%publisher)
return publisher
return publisher
|
3,355 | e2e34db52e17c188cab63a870f0bc77cbc9ef922 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import random
import helper as hp
def insertion_sort(items, start, end):
"""
Arguments:
- `items`:
"""
n = end - start + 1
for i in range(start+1, end+1):
for j in range(i, start, -1):
if items[j] < items[j-1]:
items[j], items[j-1] = items[j-1], items[j]
else:
break
return items
def merge_sort(items):
aux = items[:]
def merge(lo, mid, hi):
# copy lo ... hi to aux
if items[mid] <= items[mid+1]:
return
aux[lo:mid+1] = items[lo:mid+1]
# copy the right half in decreasing order
aux[mid+1:hi+1] = items[mid+1:hi+1][::-1]
head, tail = lo, hi
for k in range(lo, hi+1):
if aux[head] < aux[tail]:
items[k] = aux[head]
head += 1
else:
items[k] = aux[tail]
tail -= 1
def merge_sort_wrapper(lo, hi):
if hi <= lo:
return
# use insertion sort for omall pieces
if (hi - lo) < 5:
insertion_sort(items, lo, hi)
return
mid = (lo + hi) / 2
merge_sort_wrapper(lo, mid)
merge_sort_wrapper(mid+1, hi)
merge(lo, mid, hi)
merge_sort_wrapper(0, len(items) - 1)
if __name__ == '__main__':
items = [random.randint(1, 1000) for _ in xrange(20)]
print items
merge_sort(items)
print items
|
3,356 | 96910e9b6861fc9af0db3a3130d898fd1ee3daad | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 foree <foree@foree-pc>
#
# Distributed under terms of the MIT license.
"""
配置logging的基本配置
"""
import logging
import sys
import os
from common.common import get_root_path
FILE_LEVEL = logging.DEBUG
STREAM_LEVEL = logging.WARN
LOG_DIR = os.path.join(get_root_path(), 'log')
PATH_LOG = os.path.join(get_root_path(), 'log/advanced_build_kit.log')
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
if not os.path.exists(PATH_LOG):
f = open(PATH_LOG, 'w')
f.write('')
f.close()
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create formatter
message_fmt = "%(asctime)s %(process)d/%(filename)s %(levelname)s/%(funcName)s(%(lineno)d): %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=message_fmt, datefmt=datefmt)
# create file handler
fh = logging.FileHandler(PATH_LOG)
fh.setLevel(FILE_LEVEL)
fh.setFormatter(formatter)
logger.addHandler(fh)
# create stdout handler
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(STREAM_LEVEL)
sh.setFormatter(formatter)
logger.addHandler(sh)
|
3,357 | e533b7aadd1cd7137301af8862dd2987622e499e | #!/bin/env python
from boincvm_common.stomp.StompProtocol import StompProtocolFactory
from stomp.HostStompEngine import HostStompEngine
from boincvm_host.xmlrpc.HostXMLRPCService import HostXMLRPCService
from twisted.internet import reactor
from ConfigParser import SafeConfigParser
import coilmq.start
import logging
import multiprocessing
import time
import pdb
logging.basicConfig(level=logging.DEBUG, \
format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', )
logger = logging.getLogger(__name__)
def startSTOMPBroker(config, serverUpEvent, tries=-1, delay=1, backoff=1.5):
"""
@param tries number of times to retry starting the broker. < 0 means infinitely many.
@param delay number of seconds to wait after the first failed attempt
@param backoff factor by which the delay will be incremented after a failure.
"""
#stomp broker
mtries = tries
mdelay = delay
coilserver = None
from coilmq.config import config as coilconfig
if config.has_section('coilmq'):
for k,v in config.items('coilmq'):
coilconfig.set('coilmq', k, v)
logger.debug("Set %s to %s for coilmq config." % (k,v))
while True:
try:
coilserver = coilmq.start.server_from_config(coilconfig)
logger.info("Stomp server listening on %s:%s" % \
coilserver.server_address)
serverUpEvent.set()
coilserver.serve_forever()
except IOError as ex:
logger.error("Exception while starting coilmq broker: '%s'", ex)
if mtries != 0:
logger.debug("Retrying coilmq startup in %.1f seconds...", mdelay)
time.sleep(mdelay)
mdelay *= backoff
mtries -= 1
else:
logger.debug("Ran out of trials (tried %d times) for coilmq startup. Giving up.", tries)
break
finally:
if coilserver: coilserver.server_close()
def start(config, brokerTimeout = 60.0):
"""
Start twisted event loop and the fun should begin...
@param brokerTimeout how long to wait for a broker
@return a negative number upon failure. Otherwise, it never returns.
"""
manager = multiprocessing.Manager()
serverUpEvent = manager.Event()
broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent))
broker.daemon = True
broker.name = 'STOMP-Broker'
broker.start()
serverUpEvent.wait(brokerTimeout)
if not serverUpEvent.is_set():
logger.fatal("Broker not available after %.1f seconds. Giving up", brokerTimeout)
return -1
#host side logic
host = config.get('Broker', 'host')
port = int(config.get('Broker', 'port'))
username = config.get('Broker', 'username')
password = config.get('Broker', 'password')
hostEngine = HostStompEngine(config)
stompProtocolFactory = StompProtocolFactory(hostEngine, username, password)
HostXMLRPCService(config).makeEngineAccesible(hostEngine)
reactor.connectTCP(host, port, stompProtocolFactory)
reactor.run()
if __name__ == '__main__':
from sys import argv, exit
if len(argv) < 2:
print "Usage: %s <config-file>" % argv[0]
exit(-1)
else:
configFile = argv[1]
config = SafeConfigParser()
config.read(configFile)
exit(start(config))
|
3,358 | fcfec521e071aa586febc74efb2deb0e9d0a331e | from sys import stdin
def IsPrime(x):
for i in range(2, int(x ** 0.5) + 1):
if not x % i:
return False
return True
for x in stdin:
x = x[:-1]
y = x[::-1]
a = IsPrime(int(x))
b = IsPrime(int(y))
if not a:
print("%s is not prime." %x)
elif (a and not b) or (a and x == y):
print("%s is prime." %x)
else:
print("%s is emirp." %x)
|
3,359 | 0065a493767a2080a20f8b55f76ddeae92dc27f1 | /home/mitchellwoodbine/Documents/github/getargs/GetArgs.py |
3,360 | 1ab5c6a56ac229c5a9892a9848c62a9a19a0dda7 | print('\n----------------概率与统计--------------------')
import numpy as np
import scipy
import sympy as sym
import matplotlib.pyplot as plt
import sklearn.datasets as sd
iris = sd.load_iris()
x1 = np.random.random([10000]) # 均匀分布
x2 = np.random.normal(2, 1, [10000]) # 正态分布
x3 = np.random.normal(5, 1, [10000]) # 正态分布
# print(len(x1),len(x2))
# print(x1.shape,x2.shape)
# coin = np.random.randint(0, 3, [1000])
# print(coin)
# print(np.mean(coin))
# plt.hist(coin)
# plt.hist(x1, bins=20)
x1_mu = np.mean(x1)
x1_std = np.std(x1)
x2_mu = np.mean(x2)
x2_std = np.std(x2)
x3_mu = np.mean(x3)
x3_std = np.std(x3)
print('\n-----------------高斯分布-------------------')
def mode(x, mu, std):
return 1 / np.sqrt(2 * np.pi) / std * np.exp(-(x - mu) ** 2 / 2 / std ** 2)
# xplot = np.linspace(-2, 8, 10000)
# print(x2_mu, x2_std, x3_mu, x3_std)
# x1_guass = mode(xplot, x1_mu, x1_std)
# x2_guass = mode(xplot, x2_mu, x2_std)
# x3_guass = mode(xplot, x3_mu, x3_std)
# plt.plot(xplot, x1_guass)
# plt.plot(xplot, x2_guass)
# plt.plot(xplot, x3_guass)
# plt.hist(x1, bins=30, alpha=0.5, density=True)
# plt.hist(x2, bins=30, alpha=0.5, density=True)
# plt.hist(x3, bins=30, alpha=0.5, density=True)
# plt.show()
print('\n---------------散点图---------------------')
x5 = np.random.normal(1, 1, [1000])
x6 = 2 * x5 + 1 + np.random.normal(0, 0.6, [1000]) # 噪声
# plt.scatter(x5, x6)
# plt.show()
print('\n-----------------协方差-------------------')
rand1 = np.random.normal(loc=1, scale=3, size=[1000]) * 10
rand2 = np.random.normal(1, 3, size=[1000]) * 10
# plt.hist(rand1, bins=30, alpha=0.5, density=True)
# plt.hist(rand2, bins=30, alpha=0.5, density=True)
# plt.show()
def conv(dt1, dt2):
return np.mean((dt1 - np.mean(dt1)) * (dt2 - np.mean(dt2)))
print('conv', conv(x5, x6))
print('\n----------------线性相关系数--------------------')
def rho(p1, p2):
return conv(p1, p2) / np.std(p1) / np.std(p2)
print('\n---------------坐标轴旋转---------------------')
print('rho', rho(x5, x6))
# plt.scatter(x5, x6)
# plt.axis("equal")
# plt.show()
print('\n---------------信息熵---------------------')
px = np.linspace(0.01, 0.99, 1000)
y = px * np.log((1 / px)) + (1 - px) * np.log(1 / (1 - px))
plt.plot(px, y)
plt.show()
|
3,361 | 7b9bf791d52fdc801e24d0c8541d77d91a488e12 | from typing import Any, Sequence, Callable, Union, Optional
import pandas as pd
import numpy as np
from .taglov import TagLoV
def which_lov(series: pd.Series,
patterns: Sequence[Sequence[Any]],
method: Optional[Union[Callable, str]] = None,
**kwargs) -> np.ndarray:
"""Which list-of-values does every element of series match first?
Warnings:
Order of LoVs is important as only the first match is considered.
Args:
series: pandas Series of data with index
patterns: list of lists-of-values
method: method to use for pattern matching
Options are:
- None: elements of series and values are checked for equality
- 'match', 'contains', 'startswith', 'endswith': pandas'\
Series.str.<...> methods used, with arguments passed as kwargs
- custom function that accepts series, values (flat list of all\
values across all LoVs) and kwargs
**kwargs:
additional keyword arguments to pass to matching functions
Returns:
Numeric numpy array
- 0 means no match found in any of the LoV
- 1 means some value of LoV #0 matched
- 2 means some value of LoV #1 matched
- etc.
"""
elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]
if not elov:
return np.zeros(series.size, int)
num, value = zip(*elov)
lov_idx_plus = np.concatenate(([0], num))
if method is None:
mm = series.to_numpy() == np.array(value)[:, np.newaxis]
elif not callable(method): # assume name of pd.Series.str method
ptns = pd.Series(value)
kwargs['na'] = False
do_match = getattr(series.str, method)
mm = ptns.apply(do_match, **kwargs).values
else:
mm = method(series, value, **kwargs)
return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]
def which_tag(series: pd.Series,
taglov: Union[TagLoV, Any],
na: Any,
donor: pd.Series = None,
method: Optional[Union[Callable, str]] = None,
**kwargs):
"""Returns tag of the first matched List-of-Values.
For each element in ``series`` returned is the tag of the list-of-values
in the dictionary of LoVs ``taglov`` which first matches the element with
one of its values *OR* value from donor with the same index *OR* ``na``.
For matching methods see :any:`which_lov`.
Args:
series: pandas Series of data
taglov: tagged list of values: TagLov object or anything that can
properly initialise it, including None
na: value to use if element is not matched, last resort
donor: pandas Series of data to pick in case element is not matched
method: name of Series.str.<...> method or None for equality check or
custom function
**kwargs: arguments to the method
Returns:
Series
"""
if series.empty:
return series
if not isinstance(taglov, TagLoV):
taglov = TagLoV(taglov)
lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)
tags_plus = np.array((na, *taglov.tags))
result = pd.Series(tags_plus[lov_idx_plus], index=series.index)
if isinstance(donor, pd.Series): # take unmatched values from donor
unmatched_idx = series.index[~lov_idx_plus.astype(bool)]
if not unmatched_idx.empty:
take_idx = unmatched_idx.intersection(donor.index)
if not take_idx.empty:
result[take_idx] = donor[take_idx]
return result
|
3,362 | 85c51f155439ff0cb570faafc48ac8da094515bf | # the age of some survivors
survived_age = [48.0, 15.0, 40.0, 36.0, 47.0, \
32.0, 60.0, 31.0, 17.0, 36.0, 39.0, 36.0, 32.5, \
39.0, 38.0, 36.0, 52.0, 29.0, 35.0, 35.0, 49.0, \
16.0, 27.0, 22.0, 27.0, 35.0, 3.0, 11.0, 36.0, \
1.0, 19.0, 24.0, 33.0, 43.0, 24.0, 32.0, 49.0, \
30.0, 49.0, 60.0, 23.0, 26.0, 24.0, 40.0, 25.0, \
36.0, 48.0, 21.0, 29.0, 24.0, 44.0, 41.0, 2.0, \
28.0, 40.0, 22.0, 33.0, 35.0, 24.0, 28.0, 17.0, 16.0, 48.0]
# the age of some victims
non_survived_age = [47.0, 55.0, 36.0, 38.0, 19.0, \
24.0, 36.0, 45.5, 45.0, 46.0, 57.0, 25.0, 58.0, \
46.0, 50.0, 56.0, 58.0, 62.0, 64.0, 39.0, 21.0, \
47.0, 45.0, 18.0, 70.0, 2.0, 36.0, 61.0, 47.0, \
29.0, 40.0, 19.0, 65.0, 50.0, 54.0, 36.5, 31.0]
# average age of survivors
ave_survived_age = sum(survived_age)/len(survived_age)
# take two decimal places
ave_survived_age = round(ave_survived_age,2)
# average age of victims
ave_non_survived_age = sum(non_survived_age)/len(non_survived_age)
ave_non_survived_age = round(ave_non_survived_age,2)
print("The ave_age of survivors is {}".format(ave_survived_age))
print("The ave_age of victims is {}".format(ave_non_survived_age))
# The ave_age of survivors is 31.71
# The ave_age of victims is 42.65
|
3,363 | 49703775da87e8cbbe78a69c91a68128c3fd78e1 | from django.shortcuts import render, redirect
from .models import League, Team, Player
from django.db.models import Count
from . import team_maker
def index(request):
baseball = League.objects.filter(name__contains='Baseball')
women_league = League.objects.filter(name__contains='women')
hockey_league = League.objects.filter(sport__contains='hockey')
not_football = League.objects.exclude(sport='soccer')
conference = League.objects.filter(name__contains='conference')
atlanta = Team.objects.filter(location='Atlanta')
dallas = Team.objects.filter(location='Dallas')
raptors = Team.objects.filter(team_name__contains='Raptors')
city = Team.objects.filter(location__contains='city')
ti = Team.objects.filter(team_name__startswith='T')
order = Team.objects.order_by('location')
Iorder = Team.objects.order_by('-team_name')
cooper = Player.objects.filter(last_name='Cooper')
joshua = Player.objects.filter(first_name='Joshua')
josh = Player.objects.filter(
last_name='Cooper') & Player.objects.exclude(first_name='Joshua')
aw = Player.objects.filter(
first_name='Alexander') | Player.objects.filter(first_name='Wyatt')
# second part
atlantic = Team.objects.filter(league__name='Atlantic Soccer Conference')
player_boston = Player.objects.filter(
curr_team__team_name='Penguin', curr_team__location='Boston')
jugadores = Player.objects.filter(
curr_team__league__name='International Collegiate Baseball Conference')
amateur_soccer = Player.objects.filter(
curr_team__league__name='American Amateur Soccer Conference').filter(last_name='Lopez')
soccer = Player.objects.filter(all_teams__league__sport='Soccer')
sophia = Team.objects.filter(curr_players__first_name='Sophia')
sophia_leagues = League.objects.filter(
teams__curr_players__first_name='Sophia')
flores = Player.objects.filter(last_name='FLores').exclude(
curr_team__team_name='Washington Roughriders')
evans = Team.objects.filter(all_players__first_name='Samuel', all_players__last_name='Evans') & Team.objects.filter(
curr_players__first_name='Samuel', curr_players__last_name='Evans')
thunder_cat = Player.objects.filter(
all_teams__team_name='Tigers') | Player.objects.filter(curr_team__team_name='Tigers')
# whichitas team
try:
loswichitavikin = Team.objects.get(
team_name="Vikings", location="Wichita")
wichita_players = loswichitavikin.all_players.all()
wichita_current_ids = [
player.id for player in loswichitavikin.curr_players.all()]
not_now_wichita = [
player for player in wichita_players if player.id not in wichita_current_ids]
except Team.DoesNotExist:
not_now_wichita = []
joshuas2 = Player.objects.filter(first_name='Joshua') & Player.objects.filter(
all_teams__league__name='Atlantic Federation of Collegiate Baseball Athletics')
team12 = Team.objects.annotate(Count('curr_players')).annotate(Count(
'all_players')).filter(curr_players__count__gte=12).filter(all_players__count__gte=12)
orderplayer = Player.objects.annotate(
Count('all_teams')).order_by('all_teams__count')
'''
Detroit colt 4
try:
loswichitavikin = Team.objects.get(team_name = "Vikings", location = "Wichita")
wichita_players = loswichitavikin.all_players.all()
wichita_current_ids = [player.id for player in loswichitavikin.curr_players.all()]
not_now_wichita = [player for player in wichita_players if player.id not in wichita_current_ids]
except Team.DoesNotExist:
not_now_wichita = []
'''
# jacob 12
context = {
"leagues": League.objects.all(),
"teams": Team.objects.all(),
"players": Player.objects.all(),
'baseball': baseball,
'women_league': women_league,
'hockey_league': hockey_league,
'not_football': not_football,
'conference': conference,
'atlanta': atlanta,
'dallas': dallas,
'raptor': raptors,
'city': city,
'ti': ti,
'order': order,
'Iorder': Iorder,
'cooper': cooper,
'joshua': joshua,
'josh': josh,
'aw': aw,
'atlantic': atlantic,
'player_boston': player_boston,
'jugadores': jugadores,
'amateur_soccer': amateur_soccer,
'soccer': soccer,
'sophia': sophia,
'sophia_leagues': sophia_leagues,
'flores': flores,
'evans': evans,
'thunder_cat': thunder_cat,
'not_now_wichita': not_now_wichita,
'joshuas2': joshuas2,
'team12': team12,
'orderplayer': orderplayer
}
return render(request, "leagues/index.html", context)
def make_data(request):
team_maker.gen_leagues(10*2)
team_maker.gen_teams(50*2)
team_maker.gen_players(200*2)
return redirect("index")
|
3,364 | 68fa47e528e5c7c553c3c49ee5b7372b8a956302 | import socket
import struct
from fsuipc_airspaces.position import Position
# Adapted from tools/faker.js in github.com/foucdeg/airspaces
_START_BUFFER = bytes([68, 65, 84, 65, 60, 20, 0, 0, 0])
_END_BUFFER = bytes([0] * 20)
_START_TRANSPONDER = bytes([104, 0, 0, 0, 0, 0, 0, 0])
_END_TRANSPONDER = bytes([0] * 24)
def _encode(position: Position) -> bytes:
return _START_BUFFER \
+ struct.pack("<fff", position.latitude, position.longitude, position.altitude) \
+ _END_BUFFER \
+ _START_TRANSPONDER \
+ struct.pack("<f", position.transponder) \
+ _END_TRANSPONDER
class XPlaneDataOut():
def __init__(self, host: str, port: int) -> None:
self.address = (host, port)
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
def write(self, data: Position) -> None:
self.socket.sendto(_encode(data), self.address)
|
3,365 | 362bfc5a35b09817ce071e71a72e574a28ea287d | from itertools import groupby
def solve(tribes):
attacks = []
for t in tribes:
D, N, W, E, S, DD, DP, DS = t
for i in range(N):
d = D + DD * i
w = W + DP * i
e = E + DP * i
s = S + DS * i
attacks.append((d, w, e, s))
attacks = sorted(attacks)
ret = 0
days = []
for k, g in groupby(attacks, key=lambda x: x[0]):
days.append(list(g))
wall = {}
for day in days:
for a in day:
_d, w, e, s = a
for i in range(w, e):
h = wall.get(i, 0)
if h < s:
ret += 1
break
for a in day:
_d, w, e, s = a
for i in range(w, e):
wall[i] = max(wall.get(i, 0), s)
return ret
def line(f):
return map(int, f.readline().split())
def main(f):
(T,) = line(f)
for i in range(T):
(N,) = line(f)
tribes = []
for j in range(N):
rec = line(f)
tribes.append(rec)
assert len(tribes) == N
print('Case #{}: {}'.format(i + 1, solve(tribes)))
if __name__ == '__main__':
import sys
main(sys.stdin)
#with open('sample.in') as f:
#main(f)
|
3,366 | 958f6e539f9f68892d77b6becc387581c6adfa16 | """
Tests for the Transformer RNNCell.
"""
import pytest
import numpy as np
import tensorflow as tf
from .transformer import positional_encoding, transformer_layer
from .cell import (LimitedTransformerCell, UnlimitedTransformerCell,
inject_at_timestep, sequence_masks)
def test_inject_at_timestep():
with tf.Graph().as_default():
with tf.Session() as sess:
in_seq = tf.constant(np.array([
[
[1, 2, 3, 4],
[5, 6, 7, 8],
],
[
[9, 10, 11, 12],
[13, 14, 15, 16],
],
[
[17, 18, 19, 20],
[21, 22, 23, 24],
],
], dtype='float32'))
injection = tf.constant(np.array([
[-1, -2, -3, -4],
[-5, -6, -7, -8],
[-9, -10, -11, -12],
], dtype='float32'))
indices = np.array([0, 1, 0], dtype='int32')
injected = sess.run(inject_at_timestep(indices, in_seq, injection))
expected = np.array([
[
[-1, -2, -3, -4],
[5, 6, 7, 8],
],
[
[9, 10, 11, 12],
[-5, -6, -7, -8],
],
[
[-9, -10, -11, -12],
[21, 22, 23, 24],
],
], dtype='float32')
assert (injected == expected).all()
def test_sequence_masks():
with tf.Graph().as_default():
with tf.Session() as sess:
indices = tf.constant(np.array([3, 1, 2], dtype='int32'))
actual = sess.run(sequence_masks(indices, tf.constant(4, dtype=tf.int32), tf.float32))
expected = np.array([
[0, 0, 0, 0],
[0, 0, -np.inf, -np.inf],
[0, 0, 0, -np.inf],
], dtype='float32')
assert (actual == expected).all()
@pytest.mark.parametrize('cell_cls', [LimitedTransformerCell, UnlimitedTransformerCell])
@pytest.mark.parametrize('num_layers', [1, 2, 6])
def test_basic_equivalence(cell_cls, num_layers):
"""
Test that both transformer implementations produce the
same outputs when applied to a properly-sized
sequence.
"""
with tf.Graph().as_default():
with tf.Session() as sess:
pos_enc = positional_encoding(4, 6, dtype=tf.float64)
in_seq = tf.get_variable('in_seq',
shape=(3, 4, 6),
initializer=tf.truncated_normal_initializer(),
dtype=tf.float64)
cell = cell_cls(pos_enc, num_layers=num_layers, num_heads=2, hidden=24)
actual, _ = tf.nn.dynamic_rnn(cell, in_seq, dtype=tf.float64)
with tf.variable_scope('rnn', reuse=True):
with tf.variable_scope('transformer', reuse=True):
expected = in_seq + pos_enc
for _ in range(num_layers):
expected = transformer_layer(expected, num_heads=2, hidden=24)
sess.run(tf.global_variables_initializer())
actual, expected = sess.run((actual, expected))
assert not np.isnan(actual).any()
assert not np.isnan(expected).any()
assert actual.shape == expected.shape
assert np.allclose(actual, expected)
@pytest.mark.parametrize('cell_cls', [UnlimitedTransformerCell])
def test_past_horizon(cell_cls):
"""
Test the cell when the input sequence is longer than
the time horizon.
"""
with tf.Graph().as_default():
with tf.Session() as sess:
pos_enc = positional_encoding(4, 6, dtype=tf.float64)
in_seq = tf.get_variable('in_seq',
shape=(3, 5, 6),
initializer=tf.truncated_normal_initializer(),
dtype=tf.float64)
cell = cell_cls(pos_enc, num_layers=3, num_heads=2, hidden=24)
actual, _ = tf.nn.dynamic_rnn(cell, in_seq, dtype=tf.float64)
def apply_regular(sequence):
with tf.variable_scope('rnn', reuse=True):
with tf.variable_scope('transformer', reuse=True):
expected = sequence + pos_enc
for _ in range(3):
expected = transformer_layer(expected, num_heads=2, hidden=24)
return expected
expected = tf.concat([apply_regular(in_seq[:, :-1]),
apply_regular(in_seq[:, 1:])[:, -1:]], axis=1)
sess.run(tf.global_variables_initializer())
actual, expected = sess.run((actual, expected))
assert not np.isnan(actual).any()
assert not np.isnan(expected).any()
assert actual.shape == expected.shape
assert np.allclose(actual, expected)
@pytest.mark.parametrize('cell_cls', [LimitedTransformerCell, UnlimitedTransformerCell])
def test_mismatched_starts(cell_cls):
"""
Test the cell when the states are split up and
recombined from different timesteps.
"""
with tf.Graph().as_default():
with tf.Session() as sess:
pos_enc = positional_encoding(5, 6, dtype=tf.float64)
in_seq = tf.get_variable('in_seq',
shape=(3, 5, 6),
initializer=tf.truncated_normal_initializer(),
dtype=tf.float64)
cell = cell_cls(pos_enc, num_layers=3, num_heads=2, hidden=24)
_, states_1 = tf.nn.dynamic_rnn(cell, in_seq[:, :1], dtype=tf.float64)
_, states_2 = tf.nn.dynamic_rnn(cell, in_seq[:, :2], dtype=tf.float64)
_, states_3 = tf.nn.dynamic_rnn(cell, in_seq[:, :3], dtype=tf.float64)
new_states = tuple(tf.stack([s2[0], s3[1], s1[2]], axis=0)
for s1, s2, s3 in zip(states_1, states_2, states_3))
full_seq, _ = tf.nn.dynamic_rnn(cell, in_seq, dtype=tf.float64)
expected = tf.stack([full_seq[0, 2:4], full_seq[1, 3:5], full_seq[2, 1:3]], axis=0)
inputs = tf.stack([in_seq[0, 2:4], in_seq[1, 3:5], in_seq[2, 1:3]], axis=0)
actual, _ = tf.nn.dynamic_rnn(cell, inputs, initial_state=new_states)
sess.run(tf.global_variables_initializer())
actual, expected = sess.run((actual, expected))
assert not np.isnan(actual).any()
assert not np.isnan(expected).any()
assert actual.shape == expected.shape
assert np.allclose(actual, expected)
|
3,367 | c6b80a7dfce501bfe91f818ac7ab45238a0a126b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import enhancedyaml
import vector
def roots_of_n_poly_eq(n, x, var_upper_bounds=tuple()):
'''find the all possible non-negative interger roots of a `n`-term polynomial equals `x`.'''
countdown = lambda: xrange(x if not var_upper_bounds else var_upper_bounds[0], -1, -1)
if n <= 0:
return []
elif n == 1:
return map(lambda i: [i], countdown())
elif n == 2:
return map(lambda i: [i, x-i], countdown())
else:
roots = []
for i in countdown():
for root in roots_of_n_poly_eq(n-1, x-i, var_upper_bounds[1:]):
roots.append([i] + root)
return roots
class Arc(enhancedyaml.YAMLObject):
@property
def max_cap(self):
'''as the `C` in paper'''
if not hasattr(self, '_max_cap'):
self._max_cap = max(self.caps)
return self._max_cap
def calc_flow(self, commdity_idx, demand):
return self.consumed_caps[commdity_idx] * demand
def __repr__(self):
if hasattr(self, 'anchor'):
return '<Arc &%s>' % self.anchor
else:
return repr(self)
class Network(enhancedyaml.YAMLObject):
def is_through(self, path_idx, arc):
'''return Is the path through this arc?'''
if not hasattr(self, '_through_table'):
for arc_idx, arc in enumerate(self.arcs):
arc.network = self
arc.idx = arc_idx
self._through_table = []
for path_idx, path in enumerate(self.paths):
self._through_table.append([False] * (arc_idx+1))
for arc in path:
self._through_table[path_idx][arc.idx] = True
return self._through_table[path_idx][arc.idx]
def calc_flow(self, arc, current_commodties_demands):
'''the flow of the paths of an arc with specific demand case'''
consumed_cap = .0
for commdity_idx, demands in enumerate(current_commodties_demands):
for path_idx, demand in enumerate(demands):
if self.is_through(path_idx, arc):
consumed_cap += arc.calc_flow(commdity_idx, demand)
from math import ceil
return int(ceil(consumed_cap))
def gen_feasible_flow_vectors(self):
'''return feasible flow vectors (`Fs`) by enumerating all demand cases'''
from itertools import product
parts_of_possible_flow_vectors = {}
for x in self.max_demands:
if x not in parts_of_possible_flow_vectors:
parts_of_possible_flow_vectors[x] = roots_of_n_poly_eq(len(self.paths), x)
for i, possible_flow_vector in enumerate(product(*[parts_of_possible_flow_vectors[x] for x in self.max_demands])):
if any(self.calc_flow(arc, possible_flow_vector) > arc.max_cap for arc in self.arcs):
continue
yield possible_flow_vector
def trans_to_cap_vector(self, flow_vectors):
'''translate the flow vectors (`Fs`) into capacity vector (`Xs`)'''
cap_vectors = []
for flow_vector in flow_vectors:
cap_vector = tuple(self.calc_flow(arc, flow_vector) for arc in self.arcs)
if cap_vector not in cap_vectors:
cap_vectors.append(cap_vector)
return cap_vectors
def calc_minimal_cap_vectors(self):
'''merge serval steps into this method'''
feasible_flow_vectors = self.gen_feasible_flow_vectors()
cap_vectors = self.trans_to_cap_vector(feasible_flow_vectors)
return vector.min(cap_vectors)
if __name__ == '__main__':
locals().update(enhancedyaml.load(open('data.yaml')))
from pprint import pprint
print '# Example 1'
print
print '## paths'
print
pprint(example1.paths)
print
print '## feasible flow vectors'
print
feasible_flow_vectors = list(example1.gen_feasible_flow_vectors())
pprint(feasible_flow_vectors)
print
print '## capacity vectors'
print
cap_vectors = example1.trans_to_cap_vector(feasible_flow_vectors)
pprint(cap_vectors)
print
print '## minimal capacity vectors'
print
pprint(vector.min(cap_vectors))
print
|
3,368 | 1f40c0ed8e449354a5a87ef18bb07978a9fb8a1c | #!/usr/bin/env python
import utils
def revcomp(s):
comp = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}
return ''.join([comp[c] for c in reversed(s)])
def reverse_palindromes(s):
results = []
l = len(s)
for i in range(l):
for j in range(4, 13):
if i + j > l:
continue
s1 = s[i:i+j]
s2 = revcomp(s1)
if s1 == s2:
results.append((i + 1, j))
return results
if __name__ == "__main__":
seq = utils.load_multifasta('files/rosalind_revp.txt').values()[0]
results = reverse_palindromes(seq)
print "\n".join([' '.join(map(str, r)) for r in results])
|
3,369 | a3fc624d6d101667ab11842eac96ed1b34d4317e | from django.apps import AppConfig
class AccountsnConfig(AppConfig):
name = 'accounts'
|
3,370 | 1c8b843174521f1056e2bac472c87d0b5ec9603e | #!/usr/bin/python
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
occl_frac = 0.445188
result = [1-occl_frac, occl_frac, 0]
#Reading res_data.txt
mnfa = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] #min NN factor array
nna = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,26,28,30] #NN Aray
fraction_data=[[[0.0 for i in range(len(mnfa))] for j in range(len(nna))] for k in range(3)]
df = open("res_data.txt", "r")
while(True):
try:
x = df.readline()
y = map(float,x.split())
if y==[]:
break
mnfa_index = mnfa.index(y[0])
nna_index = nna.index(y[1])
df.readline() #Get no. of points in each
df.readline() #Get blown up fractions
frdata = map(float,df.readline().split()) #get actual fractions
assert(len(frdata) == 3)
for i in range(3):
fraction_data[i][nna_index][mnfa_index] = frdata[i]
except(IOError):
print "What?"
break
print "Read all data from file, plotting stuff..."
fig = plt.figure()
for i in range(3):
ax = fig.add_subplot(1,3,i, projection='3d')
X = mnfa
xlen = len(X)
Y = nna
ylen = len(Y)
X, Y = np.meshgrid(X, Y)
Z = fraction_data[i]
colortuple = ('r', 'b')
colors = np.empty(X.shape, dtype=str)
for y in range(ylen):
for x in range(xlen):
colors[y, x] = colortuple[(x+y) % len(colortuple)]
ax.plot_surface(X,Y,Z,rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0)
plt.show()
|
3,371 | c0bf146ebfdb54cce80ef85c4c7f4a61632e67d4 | # Generated by Django 3.0.2 on 2020-02-18 05:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_admin'),
]
operations = [
migrations.DeleteModel(
name='admin',
),
migrations.RemoveField(
model_name='course',
name='t_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='course_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='student_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='teachers_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='user_id',
),
migrations.DeleteModel(
name='news',
),
migrations.RemoveField(
model_name='ratings',
name='course',
),
migrations.RemoveField(
model_name='student',
name='user_id',
),
migrations.RemoveField(
model_name='teachers',
name='user_id',
),
migrations.RemoveField(
model_name='viewsa',
name='sid',
),
migrations.RemoveField(
model_name='viewsa',
name='tid',
),
migrations.RemoveField(
model_name='viewsa',
name='uid',
),
migrations.DeleteModel(
name='course',
),
migrations.DeleteModel(
name='Enrollcourse',
),
migrations.DeleteModel(
name='ratings',
),
migrations.DeleteModel(
name='student',
),
migrations.DeleteModel(
name='teachers',
),
migrations.DeleteModel(
name='User',
),
migrations.DeleteModel(
name='viewsa',
),
]
|
3,372 | f26b127b4d968c1a168a57825a5acfffbf027bef | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class ResPartner(models.Model):
_inherit = 'res.partner'
purchase_type = fields.Many2one('purchase.order.type', string='Purchase Order Type')
|
3,373 | f9a0c3b643c2ee6bb6778477bf8fc21564812081 | # -*- coding: utf-8 -*-
# @Time : 2019/9/17 17:48
# @Author : ZhangYang
# @Email : ian.zhang.88@outlook.com
from functools import wraps
def create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):
if not zk_client.exists(base_path):
zk_client.ensure_path(base_path)
new_node = zk_client.create( base_path+'/'+prefix, ''.encode('utf-8'), sequence=True, ephemeral=is_ephemeral )
return new_node
class SetGetMixin():
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
def set(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.zk_client.ensure_path(getattr(self, path_variable))
return func(self, *args, **kwargs)
return wrapper
return decorator |
3,374 | ad5a9e353d065eee477381aa6b1f233f975ea0ed | """
Auteur:Fayçal Chena
Date : 07 avril 2020
Consignes :
Écrire une fonction alea_dice(s) qui génère trois nombres (pseudo) aléatoires à l’aide
de la fonction randint du module random, représentant trois dés (à six faces avec
les valeurs de 1 à 6), et qui renvoie la valeur booléenne True si les dés forment un 421,
et la valeur booléenne False sinon.
Le paramètre s de la fonction est un nombre entier, qui sera passé en argument
à la fonction random.seed au début du code de la fonction. Cela permettra de
générer la même suite de nombres aléatoires à chaque appel de la fonction,
et ainsi de pouvoir tester son fonctionnement.
"""
def foo_6(x, y):
return y, x
a = 4
b = 8
foo_6(a, b)
print(a, b)
|
3,375 | bf683f8e7fb5ad5f7cd915a8a01d9adf7d13e739 |
def first_repeat(chars):
for x in chars:
if chars.count(x) > 1:
return x
return '-1'
|
3,376 | bea1a5bc9c92d095a2f187a4c06d18d0a939f233 | source = open("input.txt", "r")
total = 0
def calculateWeight( weight ):
fuel = calculateFuel(weight)
if fuel > 0:
sum = fuel + calculateWeight(fuel)
return sum
else:
return max(0, fuel)
def calculateFuel ( weight ):
return weight // 3 -2
for line in source.readlines():
total += calculateWeight(int(line))
print(total) |
3,377 | 6018f35afc6646d0302ca32de649ffe7d544a765 | """
Make html galleries from media directories. Organize by dates, by subdirs or by
the content of a diary file. The diary file is a markdown file organized by
dates, each day described by a text and some medias (photos and movies).
The diary file can be exported to:
* an html file with the text and subset of medias associated with each day,
* the previous html file extended with all medias in the media directory,
* an html file ready to import into Blogger.
"""
import sys
import os
import argparse
import glob
import shutil
import re
import io
import bisect
import locale
import textwrap
import base64
import datetime
import urllib
from configparser import ConfigParser
from collections import defaultdict
from subprocess import check_output, CalledProcessError, STDOUT
from urllib.request import urlopen
import colorama
import clipboard
import PIL
from PIL import Image, ImageChops
from lxml import objectify
import markdown
USAGE = """
galerie --gallery <root-dir> [--sourcedir <media-dir>]
[--bydir true|false*]
[--bydate true|false*]
[--diary true|false*]
[--recursive true|false*]
[--dates source*|diary|<yyyymmdd-yyyymmdd>]
[--github_pages true|false]
[--dest <directory>]
[--forcethumb]
galerie --update <root-dir>
galerie --create <root-dir> --sourcedir <media-dir>
[--recursive true|false*]
[--dates source*|<yyyymmdd-yyyymmdd>]
galerie --blogger <root-dir> --url <url>
[--check]
[--full]
[--dest <filename>]
Notes:
- * gives default
- all options can be abbreviated if there is no conflict with other options (--gallery --> --gal)
"""
# -- Post objects -------------------------------------------------------------
CAPTION_IMAGE_STYLE = '''\
<style type="text/css">
span { display:inline-table; }
</style>\
'''
STYLE = '''\
<style type="text/css">
p { margin-top:0px; margin-bottom:0px; }
h3 { font-size: 100%%; font-weight: bold; margin-top:0px; margin-bottom:0px; }
</style>
'''
START = f'''\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>%s</title>
<link rel="icon" href="favicon.ico" />
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="photobox/photobox.css">
<script src="photobox/jquery.min.js"></script>
<script src="photobox/jquery.photobox.js"></script>
{CAPTION_IMAGE_STYLE}
{STYLE}
</head>
<body>\
'''
BUTTONS = '''\
<button id="btn_full" type="button" style="position: fixed; width: 50px; top: 20px; right: 20px; background-color:white">Full</button>
<button id="btn_blog" type="button" style="position: fixed; width: 50px; top: 40px; right: 20px; background-color:white">Diary</button>
<button id="btn_text" type="button" style="position: fixed; width: 50px; top: 60px; right: 20px; background-color:white">Text</button>
<script>
$('#btn_full').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").show();
$("div.extra").show();
});
$('#btn_text').click(function() {
$("[id^=gallery-blog]").hide();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
$('#btn_blog').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
</script>
'''
SUBDIR_BACKCOL = '#eee'
END = '</body>\n</html>'
SEP = '<hr color="#C0C0C0" size="1" />'
IMGPOST = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDPOST = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
IMGPOSTCAPTION = '''\
<span>
<a href="%s"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
VIDPOSTCAPTION = '''\
<span>
<a href="%s" rel="video"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
IMGDCIM = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDDCIM = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
# diminution de l'espace entre images, on utilise :
# "display: block;", "margin-bottom: 0em;" et "font-size: 0;"
# "display: block;" dans img : espacement correct ordi mais pas centré téléphone
# "display: block;" dans a : ok
DIRPOST = '<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>'
DIRPOSTCAPTION = f'''
<span style="background-color:{SUBDIR_BACKCOL}; margin-bottom: 8px; border: 1px solid #C0C0C0;">
<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>
<p style="margin-left:2px;">%s</p>
</span>
'''
BIMGPAT = '''\
<div class="separator" style="clear: both; text-align: center;">
<a href="%s" style="clear: left; margin-bottom: 0em; margin-right: 1em; font-size: 0; display: block;">
<img border="0" src="%s" width="640" />
</a></div>
'''
CAPTION_PAT = '''\
<div class="separator" style="clear: both; text-align: center;">
%s
</div>
'''
class Post:
def __init__(self, date, text, medias):
# date: yyyymmdd
self.date = date
self.text = text
self.medias = medias
self.dcim = []
self.daterank = 0
self.extra = False
def __lt__(self, other):
return self.date < other.date
@classmethod
def from_markdown(cls, post):
m = re.match(r'\[(\d\d\d\d/\d\d/\d\d)\]\n*', post[0])
if m:
date = m.group(1).replace('/', '')
if not validate_date(date):
error('Incorrect date value:', date)
del post[0]
else:
error('No date in post', ' '.join(post))
while post and not post[0].strip():
del post[0]
text = ''
while post and not re.match(r'!?\[\]', post[0]):
text += post[0]
del post[0]
# remove empty lines at end
text = re.sub(r'\n\n$', '\n', text)
medias = list()
while post and (match := re.match(r'!?\[\]\((.*)\)', post[0])):
media = match.group(1)
caption = None
del post[0]
if post and not re.match(r'!?\[\]', post[0]):
caption = post[0].strip()
del post[0]
if match.group(0)[0] == '!':
medias.append(PostImage(caption, media))
else:
medias.append(PostVideo(caption, media))
return cls(date, text, medias)
@classmethod
def from_date(cls, date):
dt = datetime.datetime.strptime(date, '%Y%m%d')
datetext = dt.strftime("%A %d %B %Y").capitalize()
post = cls(date, text=datetext, medias=[])
post.daterank = 1
return post
def to_html(self, args, target='regular'):
if target == 'regular':
if args.diary:
return self.to_html_diary(args)
else:
return self.to_html_regular(args)
if target == 'blogger':
return self.to_html_blogger()
def to_html_regular(self, args):
html = list()
if self.text:
# possible with --bydate
html.append(markdown.markdown(self.text))
subdirs, dcim = dispatch_post_items(self.dcim)
if self.dcim:
html.append(SEP)
for media in subdirs:
html.append(media.to_html_dcim(args))
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
return html
def to_html_diary(self, args):
html = list()
if self.extra:
html.append('<div class="extra">')
if self.text:
html.append(markdown.markdown(self.text))
if self.medias:
html.append(f'<div id="gallery-blog-{self.date}-{self.daterank}">')
for media in self.medias:
html.append(media.to_html_post(args))
html.append('</div>')
_, dcim = dispatch_post_items(self.dcim)
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
html.append(SEP)
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
if self.extra:
html.append('</div>')
return html
def to_html_blogger(self):
html = list()
html.append(markdown.markdown(self.text))
for image in self.medias:
html.append(image.to_html_blogger())
html.append(SEP)
return html
class PostItem:
def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):
self.caption = caption
self.uri = uri
self.basename = os.path.basename(uri)
self.thumb = thumb
self.thumbsize = thumbsize
self.descr = descr
self.resized_url = None
class PostImage(PostItem):
def to_markdown(self):
if not self.caption:
return '' % (self.uri,)
else:
return '\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
if not self.caption:
return BIMGPAT % (self.uri, self.resized_url)
else:
return f'{BIMGPAT}\n{CAPTION_PAT}' % (self.uri, self.resized_url, self.caption)
class PostVideo(PostItem):
def to_markdown(self):
if not self.caption:
return '[](%s)' % (self.uri,)
else:
return '[](%s)\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
x = f'<p style="text-align: center;">{self.iframe}</p>'
if not self.caption:
return x
else:
return f'%s\n{CAPTION_PAT}' % (x, self.caption)
class PostSubdir(PostItem):
def to_html_dcim(self, args):
basename = os.path.basename(self.htmname)
posts = self.posts
title = self.caption
print_html(args, posts, title, self.htmname)
if not self.caption:
return DIRPOST % (basename, self.thumb, *self.thumbsize)
else:
return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize, self.caption)
def relative_url(path, root):
"""
returns a normalized url to path relative from root
"""
try:
url = os.path.relpath(path, root)
except:
error('Unable to make a relative url:', url, root)
url = url.replace('\\', '/') if os.sep == '\\' else url
return urllib.parse.quote(url)
# -- Markdown parser ----------------------------------------------------------
def parse_markdown(filename):
"""
Generate Post objects from markdown. Date must be present in each post and
posts must be ordrered by date.
"""
if not os.path.exists(filename):
error('File not found', filename)
posts = list()
with open(filename, encoding='utf-8') as f:
line = next(f)
if line.startswith('# '):
title = line[2:].strip()
record = []
next(f)
else:
title = None
record = [line]
for line in f:
if not line.startswith('___'):
record.append(line)
else:
posts.append(Post.from_markdown(record))
record = []
# set rank of posts in date
daterank = defaultdict(int)
for post in posts:
daterank[post.date] += 1
post.daterank = daterank[post.date]
# check post order
for post1, post2 in zip(posts[:-1], posts[1:]):
if post1.date > post2.date:
error('Posts are not ordered', f'{post1.date} > {post2.date}')
return title, posts
# -- Markdown printer ---------------------------------------------------------
def print_markdown(posts, title, fullname):
with open(fullname, 'wt', encoding='utf-8') as fdst:
print(f'# {title}\n', file=fdst)
for post in posts:
date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'
print(date, file=fdst)
if post.text:
print(file=fdst)
for line in post.text.splitlines():
if not line:
print(file=fdst)
else:
for chunk in textwrap.wrap(line, width=78):
print(chunk, file=fdst)
if post.medias:
print(file=fdst)
for media in post.medias:
print(media.to_markdown(), file=fdst)
print('______', file=fdst)
# -- html printer -------------------------------------------------------------
def compose_html_reduced(args, posts, title, target):
html = list()
html.append(START % title)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append(END)
return html
def compose_html_full(args, posts, title, target):
html = list()
html.append(START % title)
if args.diary:
html.append(BUTTONS)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append('<script>')
for post in posts:
if post.medias:
gallery_id = f'gallery-blog-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
if post.dcim:
gallery_id = f'gallery-dcim-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
html.append('</script>')
html.append(END)
return html
def print_html_to_stream(args, posts, title, stream, target):
if target == 'regular':
for line in compose_html_full(args, posts, title, target):
print(line, file=stream)
else:
for line in compose_html_reduced(args, posts, title, target):
print(line, file=stream)
def print_html(args, posts, title, html_name, target='regular'):
assert target in ('regular', 'blogger')
with io.StringIO() as f:
print_html_to_stream(args, posts, title, f, target)
html = f.getvalue()
if html_name:
if os.path.exists(html_name):
# test if the generated html is identical to the one already on disk
with open(html_name, 'rt', encoding='utf-8') as f:
html0 = f.read()
if html == html0:
return None
with open(html_name, 'wt', encoding='utf-8') as f:
f.write(html)
return None
else:
return html
GALLERYCALL = """
$('#%s').photobox('a', {
loop:%s,
thumbs:%s,
autoplay:%s,
time:%d,
zoomable:%s ,
rotatable:%s,
wheelNextPrev:%s
});
"""
def gallery_call(args, gallery_id):
return GALLERYCALL.replace('\n', '') % (
gallery_id,
str(args.photobox.loop).lower(),
str(args.photobox.thumbs).lower(),
str(args.photobox.autoplay).lower(),
args.photobox.time,
str(args.photobox.zoomable).lower(),
str(args.photobox.rotatable).lower(),
str(args.photobox.wheelNextPrev).lower(),
)
# -- Media description --------------------------------------------------------
def is_image_file(name):
return os.path.splitext(name)[1].lower() in (
'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tif'
)
def is_video_file(name):
return os.path.splitext(name)[1].lower() in (
'.mp4', '.webm', '.mkv', '.flv', '.m4v', '.avi', '.wmv', '.mts', '.vob', '.divx'
)
def is_media(name):
return is_image_file(name) or is_video_file(name)
def validate_date(datestr):
# datestr = yyyymmdd
try:
datetime.datetime.strptime(datestr, '%Y%m%d')
return True
except ValueError:
return False
def date_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})(?:\D|$)', name, re.ASCII):
digits = match.group(1)
if validate_date(digits):
return digits
return None
def date_from_item(filename):
if date := date_from_name(filename):
return date
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
def time_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})\D(\d{6})(?:\D|$)', name, re.ASCII):
digits = match.group(2)
hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits[4:6])
if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:
return digits
return None
def time_from_item(filename):
if time := time_from_name(filename):
return time
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')
FFPROBE_CMD = '''\
ffprobe -v error
-select_streams v:0
-show_entries stream=width,height,avg_frame_rate,r_frame_rate:format=duration
-of csv=p=0
'''
def get_image_info(filename):
date = date_from_item(filename)
time = time_from_item(filename)
img = Image.open(filename)
width, height = img.size
size = round(os.path.getsize(filename) / 1e6, 1)
return (date, time, width, height, size), f'{date} {time}, dim={width}x{height}, {size} MB'
def get_video_info(filename, info_fullname):
if os.path.exists(info_fullname):
with open(info_fullname) as f:
info = f.readline().split()
date, time, width, height, size, duration, fps = info[0], info[1], int(info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6])
formatted_info = format_video_info(date, time, width, height, size, duration, fps)
return (date, time, width, height, size, duration, fps), formatted_info
else:
info, formatted_info = make_video_info(filename, info_fullname)
with open(info_fullname, 'wt') as f:
print(' '.join([str(_) for _ in info]), file=f)
return info, formatted_info
def make_video_info(filename, info_fullname):
# ffmpeg must be in path
date = date_from_item(filename)
time = time_from_item(filename)
command = [*FFPROBE_CMD.split(), filename]
try:
output = check_output(command, stderr=STDOUT).decode()
width, height, fps, duration = parse_ffprobe_output(output)
size = round(os.path.getsize(filename) / 1e6, 1)
output = format_video_info(date, time, width, height, size, duration, fps)
except CalledProcessError as e:
output = e.output.decode()
warning(output)
raise
return (date, time, width, height, size, duration, fps), output
def parse_ffprobe_output(ffprobe_output):
# parse first channel data and last line for duration
match = re.match(r'(\d+),(\d+),(\d+)/(\d+),(\d+/\d+).*\s(\d+\.\d+)', ffprobe_output, re.DOTALL)
width = int(match.group(1))
height = int(match.group(2))
fps = round(int(match.group(3)) / int(match.group(4)), 1)
duration = round(float(match.group(6)))
return width, height, fps, duration
def format_video_info(date, time, width, height, size, duration, fps):
return f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'
def format_duration(duration):
mn = duration // 60
sec = duration % 60
if mn <= 59:
return f'm:s={mn:02}:{sec:02}'
else:
hour = mn // 60
mn = mn % 60
return f'h:m:s={hour:02}:{mn:02}:{sec:02}'
# -- Thumbnails (image and video) ---------------------------------------------
def thumbname(name, key):
return key + '-' + name + '.jpg'
def size_thumbnail(width, height, maxdim):
if width >= height:
return maxdim, int(round(maxdim * height / width))
else:
return int(round(maxdim * width / height)), maxdim
def make_thumbnail_image(args, image_name, thumb_name, size):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_image(image_name, thumb_name, size)
def create_thumbnail_image(image_name, thumb_name, size):
imgobj = Image.open(image_name)
if (imgobj.mode != 'RGBA'
and image_name.endswith('.jpg')
and not (image_name.endswith('.gif') and imgobj.info.get('transparency'))
):
imgobj = imgobj.convert('RGBA')
imgobj.thumbnail(size, Image.LANCZOS)
imgobj = imgobj.convert('RGB')
imgobj.save(thumb_name)
def make_thumbnail_video(args, video_name, thumb_name, size, duration):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_video(args, video_name, thumb_name, size, duration)
# base64 video.png
VIDEO_ICON = '''\
iVBORw0KGgoAAAANSUhEUgAAABgAAAAUCAAAAACy3qJfAAAA4UlEQVR4
2m1QoRbCMAy88SaK69xscfuEWiS4SZBIcCCRfAL8An8AcnJzTOJSWdxwzJXSPUoHRPQlueYuucigxm
9kDGaMf8AjopGcYn8LmmyLoihBWBiThb+5MTuUsc3aL56upneZ9sByAIg8Z8BEn96EeZ65iU7DvmbP
PxqDcH6p1swXBC4l6yZskACkTN1WrQr2SlIFhTtgqeZa+zsOogLXegvEocZ5c/W5BcoVNNCg3hSudV
/hEh4ofw6cEb00Km8i0dpRDUXfKiaQOEAdrUDo4dFp9C33jjaRac9/gDF/AlplVYtfWGCjAAAAAElF
TkSuQmCC'''
def create_thumbnail_video(args, filename, thumbname, size, duration):
# ffmpeg must be in path
delay = min(duration - 1, args.thumbnails.thumbdelay)
sizearg = '%dx%d' % size
command = 'ffmpeg -y -v error -itsoffset -%d -i "%s" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s "%s"'
command = command % (delay, filename, sizearg, thumbname)
result = os.system(command)
# add a movie icon to the thumbnail to identify videos
try:
img1 = Image.open(thumbname)
except:
# ffmpeg was unable to save thumbnail
warning('Unable to save thumbnail for', filename)
return
img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))
width, height = img1.size
img1.paste(img2, (6, height - 20 - 6), None)
img1.save(thumbname)
def make_thumbnail_subdir(args, subdir_name, thumb_name, size, items, thumbdir):
# subdir thumbnails are always created as they depend on the content of the
# directory
print('Making thumbnail:', thumb_name)
create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir)
def create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):
def size_thumbnail(width, height, xmax, ymax):
width2 = xmax
height2 = int(round(xmax * height / width))
if height2 < ymax:
width2 = int(round(ymax * width / height))
height2 = ymax
return width2, height2
thumblist = [os.path.basename(item.thumb) for item in items]
widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size, thumblist)
thumbnum = widthnum * heightnum
img = Image.new('RGB', size, SUBDIR_BACKCOL)
for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):
row = ind // widthnum
col = ind % widthnum
img2 = Image.open(os.path.join(thumbdir, thumb))
w, h = size_thumbnail(*img2.size, width[col], height[row])
cropdim = ((w - width[col]) // 2, (h - height[row]) // 2,
(w - width[col]) // 2 + width[col], (h - height[row]) // 2 + height[row])
img2 = img2.resize((w, h), Image.LANCZOS)
img2 = img2.crop(cropdim)
img.paste(img2, (offsetx[col], offsety[row]))
if os.path.exists(thumb_name):
# test if the generated thumbnail is identical to the one already on disk
imgref = Image.open(thumb_name)
# must save and reload before comparing
byteio = io.BytesIO()
img.save(byteio, "JPEG")
byteio.seek(0)
imgnew = Image.open(byteio)
diff = ImageChops.difference(imgnew, imgref)
if diff.getbbox() is None:
return
img.save(thumb_name)
def mosaic_geometry(size, thumblist):
if len(thumblist) == 1:
widthnum = 1
heightnum = 1
elif len(thumblist) <= 3:
widthnum = 1
heightnum = 2
elif len(thumblist) <= 8:
widthnum = 2
heightnum = 2
else:
widthnum = 3
heightnum = 3
if widthnum == 1:
width = [size[0] - 2]
else:
width = [size[0] // widthnum - 2] * (widthnum - 1)
width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))
if heightnum == 1:
height = [size[1] - 2]
else:
height = [size[1] // heightnum - 2] * (heightnum - 1)
height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))
offsetx = [1]
for w in width[:-1]:
offsetx.append(offsetx[-1] + w + 2)
offsety = [1]
for h in height[:-1]:
offsety.append(offsety[-1] + h + 2)
return widthnum, heightnum, width, height, offsetx, offsety
def list_of_htmlfiles(args, posts):
htmlist = list()
htmlist.append(os.path.join(args.dest, args.rootname))
for post in posts:
htmlist.extend(list_of_htmlfiles_in_items(post.dcim))
return htmlist
def list_of_htmlfiles_in_items(itemlist):
htmlist = list()
for item in itemlist:
if type(item) == PostSubdir:
htmlist.append(item.htmname)
htmlist.extend(list_of_htmlfiles_in_items(item.sublist))
return htmlist
def list_of_thumbnails(posts, diary=False):
thumblist = list()
for post in posts:
thumblist.extend(list_of_thumbnails_in_items(post.medias))
if diary is False:
thumblist.extend(list_of_thumbnails_in_items(post.dcim))
return thumblist
def list_of_thumbnails_in_items(itemlist):
thumblist = list()
for item in itemlist:
if type(item) == PostSubdir:
thumblist.append(os.path.basename(item.thumb))
thumblist.extend(list_of_thumbnails_in_items(item.sublist))
else:
thumblist.append(os.path.basename(item.thumb))
return thumblist
def purge_htmlfiles(args, posts):
"""
Purge root dir from irrelevant html files
"""
htmlist = list_of_htmlfiles(args, posts)
html_to_remove = list()
for fullname in glob.glob(os.path.join(args.root, '*.htm*')):
if fullname not in htmlist:
html_to_remove.append(fullname)
if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in html_to_remove:
print('Removing html files', name)
os.remove(name)
def purge_thumbnails(args, thumbdir, posts, diary=False):
"""
Purge thumbnail dir from irrelevant thumbnails
"""
thumblist = list_of_thumbnails(posts, diary)
thumbs_to_remove = list()
for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):
if os.path.basename(fullname) not in thumblist:
thumbs_to_remove.append(fullname)
if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in thumbs_to_remove:
print('Removing thumbnail', name)
os.remove(name)
info_fullname = os.path.splitext(name)[0] + '.info'
if os.path.exists(info_fullname):
os.remove(info_fullname)
# -- List of medias helpers ---------------------------------------------------
def is_media_within_dates(fullname, dates):
if is_media(fullname):
if type(dates) == tuple:
return dates[0] <= date_from_item(fullname) <= dates[1]
else:
return True
else:
return False
def sorted_listdir(filelist):
like_windows_explorer = True
if not filelist:
return filelist
if like_windows_explorer:
maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)
def keyfunc(name):
root, ext = os.path.splitext(name.lower())
return root.ljust(maxlen, ' ') + ext
else:
keyfunc = str.lower
return sorted(filelist, key=keyfunc)
def list_of_files(sourcedir, recursive):
"""
Return the list of full paths for files in source directory
"""
result = list()
if recursive is False:
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
result.append(os.path.join(sourcedir, basename))
else:
for root, dirs, files in os.walk(sourcedir):
if '.nomedia' not in files:
for basename in sorted_listdir(files):
result.append(os.path.join(root, basename))
return result
def list_of_medias(args, sourcedir, recursive):
"""
Return the list of full paths for pictures and movies in source directory
"""
files = list_of_files(sourcedir, recursive)
return [_ for _ in files if is_media_within_dates(_, args.dates)]
def list_of_medias_ext(args, sourcedir):
"""
Return the list of full paths for pictures and movies in source directory
plus subdirectories containing media
"""
result = list()
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
fullname = os.path.join(sourcedir, basename)
if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):
result.append(fullname)
else:
if is_media_within_dates(fullname, args.dates):
result.append(fullname)
return result
def contains_media(args, dirname):
for root, dirs, files in os.walk(dirname):
if '.nomedia' not in files:
for basename in files:
if is_media_within_dates(os.path.join(root, basename), args.dates):
return True
else:
return False
def dispatch_post_items(list_of_post_items):
subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]
medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]
return subdirs, medias
# -- Creation of gallery element ----------------------------------------------
def create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
if os.path.isfile(media_fullname):
if is_image_file(media_fullname):
return create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
else:
return create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
else:
return create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
def create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
try:
info, infofmt = get_image_info(media_fullname)
infofmt = media_basename + ': ' + infofmt
thumbsize = size_thumbnail(info[2], info[3], thumbmax)
make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)
return PostImage(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
except PIL.UnidentifiedImageError:
# corrupted image
warning('Unable to read image', media_fullname)
return None
def create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'
try:
info, infofmt = get_video_info(media_fullname, info_fullname)
infofmt = media_basename + ': ' + infofmt
thumbsize = size_thumbnail(info[2], info[3], thumbmax)
make_thumbnail_video(args, media_fullname, thumb_fullname, thumbsize, duration=info[5])
return PostVideo(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
except CalledProcessError:
# corrupted video
warning('Unable to read video', media_fullname)
return None
def create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
info, infofmt = None, None
thumbsize = (thumbmax, int(round(thumbmax / 640 * 480)))
medias_ext = list_of_medias_ext(args, media_fullname)
if not medias_ext:
return None
item = PostSubdir(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
item.htmname = os.path.join(os.path.dirname(thumbdir), media_relname + args.html_suffix)
if args.thumbnails.subdir_caption:
item.caption = media_basename
else:
item.caption = ''
_, posts = make_posts(args, media_fullname)
item.posts = posts
items = [item for post in posts for item in post.dcim]
item.sublist = items
make_thumbnail_subdir(args, media_fullname, thumb_fullname, thumbsize, items, thumbdir)
return item
def relative_name(media_fullname, sourcedir):
"""
/Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg
-->
deeper2_deepest_OCT_20000112_000004.jpg
/Gilles/Dev/journal/tests/subdir/deeper2/deepest
-->
deeper2_deepest
"""
x = os.path.relpath(media_fullname, sourcedir)
x = x.replace('\\', '_').replace('/', '_').replace('#', '_')
return x
# -- Creation of posts --------------------------------------------------------
def make_posts(args, dirname):
if args.diary is True:
if not args.sourcedir:
return make_posts_from_diary(args)
else:
return make_posts_from_diary_and_dir(args)
elif args.bydate is False:
return make_posts_from_subdir(args, dirname)
else:
return make_posts_from_subdir_and_date(args, dirname)
def make_posts_from_diary(args):
md_filename = os.path.join(args.root, 'index.md')
if os.path.exists(md_filename):
title, posts = parse_markdown(md_filename)
else:
error('File not found', md_filename)
for post in posts:
for media in post.medias:
media_fullname = os.path.join(args.root, media.uri)
item = create_item(args, media_fullname, args.root, args.thumbdir, 'post', 400)
media.thumb = item.thumb
media.thumbsize = item.thumbsize
media.descr = item.descr
return title, posts
def create_items_by_date(args, medias, posts):
# list of required dates
if args.dates == 'diary':
required_dates = {post.date for post in posts}
else:
required_dates = {date_from_item(media) for media in medias}
if type(args.dates) == tuple:
date1, date2 = args.dates
required_dates = {date for date in required_dates if date1 <= date <= date2}
bydate = defaultdict(list)
for media_fullname in medias:
date = date_from_item(media_fullname)
if date in required_dates:
item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)
if item:
bydate[date].append(item)
for date, liste in bydate.items():
liste.sort(key=lambda item: time_from_item(item.uri))
return bydate
def make_posts_from_diary_and_dir(args):
title, posts = make_posts_from_diary(args)
# list of all pictures and movies
medias = list_of_medias(args, args.sourcedir, args.recursive)
bydate = create_items_by_date(args, medias, posts)
# make list of extra dates (not in posts)
extradates = set(bydate) - {post.date for post in posts}
# complete posts with extra dates
for date in extradates:
post = Post.from_date(date)
post.extra = True
bisect.insort(posts, post)
# several posts can have the same date, only the first one is completed with dcim medias
for post in posts:
if post.date in bydate and post.daterank == 1:
post.dcim = bydate[post.date]
return title, posts
def make_posts_from_subdir(args, dirname):
# list of pictures and movies plus subdirectories
if args.bydir is False:
medias_ext = list_of_medias(args, dirname, args.recursive)
else:
medias_ext = list_of_medias_ext(args, dirname)
#required_dates = get_required_dates(args, medias_ext, posts=None)
#medias_ext_bis = []
#for media in medias_ext:
# if complies_with_required_dates(media):
# medias_ext_bis.append(media)
# complete posts
postmedias = list()
for item in medias_ext:
postmedia = create_item(args, item, args.sourcedir, args.thumbdir, 'dcim', 300)
if postmedia is not None:
postmedias.append(postmedia)
post = Post(date='00000000', text='', medias=[])
post.dcim = postmedias
posts = [post]
title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]
return title, posts
def make_posts_from_subdir_and_date(args, dirname):
# list of all pictures and movies
if args.bydir is False:
medias = list_of_medias(args, dirname, args.recursive)
subdirs = []
else:
medias_ext = list_of_medias_ext(args, dirname)
medias = [_ for _ in medias_ext if is_media(_)]
subdirs = [_ for _ in medias_ext if not is_media(_)]
# create list of posts with a single post containing all subdirs
posts = list()
items = list()
for media_fullname in subdirs:
item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)
if item:
items.append(item)
if items:
post = Post(date='00000000', text='', medias=[])
post.dcim = items
posts.append(post)
bydate = create_items_by_date(args, medias, posts)
# add dates
for date in sorted(bydate):
post = Post.from_date(date)
post.dcim = bydate[post.date]
posts.append(post)
title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]
return title, posts
# -- Creation of html page from directory tree --------------------------------
def create_gallery(args):
title, posts = make_posts(args, args.sourcedir)
print_html(args, posts, title, os.path.join(args.dest, args.rootname), 'regular')
purge_htmlfiles(args, posts)
if args.diary and not args.sourcedir:
purge_thumbnails(args, args.thumbdir, posts, diary=True)
else:
purge_thumbnails(args, args.thumbdir, posts)
# -- Creation of diary from medias --------------------------------------------
def create_diary(args):
# list of all pictures and movies
medias = list_of_medias(args, args.sourcedir, args.recursive)
# list of required dates
if args.dates == 'diary':
assert 0
else:
required_dates = {date_from_item(media) for media in medias}
if type(args.dates) == tuple:
date1, date2 = args.dates
required_dates = {date for date in required_dates if date1 <= date <= date2}
title = args.sourcedir
posts = list()
for date in sorted(required_dates):
posts.append(Post.from_date(date))
os.makedirs(args.root, exist_ok=True)
print_markdown(posts, title, os.path.join(args.root, 'index.md'))
# -- Export to blogger---------------------------------------------------------
def online_images_url(args):
try:
if args.urlblogger.startswith('http:') or args.urlblogger.startswith('https:'):
with urlopen(args.urlblogger) as u:
buffer = u.read()
else:
with open(args.urlblogger, 'rb') as f:
buffer = f.read()
except:
error('Unable to read url', args.urlblogger)
buffer = buffer.decode('utf-8')
online_images = dict()
for match in re.finditer('<div class="separator"((?!<div).)*?</div>', buffer, flags=re.DOTALL):
div_separator = match.group(0)
div_separator = div_separator.replace(' ', '')
elem_div = objectify.fromstring(div_separator)
for elem_a in elem_div.iterchildren(tag='a'):
href = elem_a.get("href")
thumb = elem_a.img.get("src")
online_images[os.path.basename(href)] = (href, thumb)
# video insertion relies only on video order
online_videos = list()
for match in re.finditer('<iframe allowfullscreen="allowfullscreen".*?</iframe>', buffer, flags=re.DOTALL):
iframe = match.group(0)
online_videos.append(iframe)
return online_images, online_videos
def compare_image_buffers(imgbuf1, imgbuf2):
"""
return True if images read on file are identical, False otherwise
"""
with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:
img1 = Image.open(imgio1)
img2 = Image.open(imgio2)
diff = ImageChops.difference(img1, img2)
return not diff.getbbox()
def check_images(args, posts, online_images):
result = True
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.basename in online_images:
with open(os.path.join(args.root, media.uri), 'rb') as f:
imgbuf1 = f.read()
try:
with urlopen(online_images[media.basename][0]) as u:
imgbuf2 = u.read()
except FileNotFoundError:
print('File not found', online_images[media.basename][0])
next
if compare_image_buffers(imgbuf1, imgbuf2) is False:
print('Files are different, upload', media.basename)
else:
if 1:
print('File already online', media.basename)
else:
print('File is absent, upload', media.basename)
result = False
elif type(media) is PostVideo:
# no check for the moment
print('Video not checked', media.basename)
else:
assert False
return result
def compose_blogger_html(args, title, posts, imgdata, online_videos):
""" Compose html with blogger image urls
"""
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.uri not in imgdata:
print('Image missing: ', media.uri)
else:
img_url, resized_url = imgdata[media.uri]
media.uri = img_url
media.resized_url = resized_url
elif type(media) is PostVideo:
if not online_videos:
print('Video missing: ', media.uri)
else:
media.iframe = online_videos[0]
del online_videos[0]
else:
assert False
return print_html(args, posts, title, '', target='blogger')
def prepare_for_blogger(args):
"""
Export blogger html to clipboard.
If --full, export complete html, otherwise export html extract ready to
paste into blogger edit mode.
"""
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
online_images, online_videos = online_images_url(args)
if args.check_images and check_images(args, posts, online_images) is False:
pass
html = compose_blogger_html(args, title, posts, online_images, online_videos)
if args.full is False:
html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)
html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)
html = STYLE.replace('%%', '%') + html
if args.dest:
with open(args.dest, 'wt', encoding='utf-8') as f:
f.write(html)
else:
clipboard.copy(html)
# -- Other commands -----------------------------------------------------------
def idempotence(args):
"""
For testing identity between a diary file and the fle obtained after reading
and printing it. See testing.
"""
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
print_markdown(posts, title, os.path.join(args.dest, 'index.md'))
# -- Configuration file ------------------------------------------------------
# The following docstring is used to create the configuration file.
CONFIG_DEFAULTS = """\
[source]
; source directory
; value: valid path
sourcedir = .
; one web page per directory
; value: true or false
bydir = false
; dispatch medias by dates
; value: true or false
bydate = false
; include text and medias from diary file
; value: true or false
diary = false
; include subdirectories recursively (used when bydir is false)
; value: true or false
recursive = false
; interval of dates to include
; value: source|diary|yyyymmdd-yyyymmdd or empty (= source)
dates =
; github Pages compatibility (.htlml extension and no dot in directory names)
; value: true or false
github_pages = false
[thumbnails]
; specifies whether or not the gallery displays media description (size, dimension, etc)
; value: true or false
media_description = true
; specifies whether subdir captions are empty or the name of the subdir
; value: true or false
subdir_caption = true
; timestamp of thumbnail in video
; value: number of seconds
thumbdelay = 5
; maximum number of thumbnails to remove without user confirmation
; value: integer
threshold_thumbs = 10
[photobox]
; Allows to navigate between first and last images
; value: true or false
loop = false
; Show gallery thumbnails below the presented photo
; value: true or false
thumbs = true
; Should autoplay on first time or not
; value: true or false
autoplay = false
; Autoplay interval (less than 1000 will hide the autoplay button)
; value: milliseconds
time = 3000
; Disable/enable mousewheel image zooming
; value: true or false
zoomable = true
; Allow rotation of the image
; value: true or false
rotatable = true
; Change image using mousewheel left/right
; value: true or false
wheelNextPrev = true
"""
class MyConfigParser (ConfigParser):
"""Add input checking."""
def __init__(self):
ConfigParser.__init__(self, inline_comment_prefixes=(';',))
def error(self, section, entry):
error('Missing or incorrect config value:', '[%s]%s' % (section, entry))
def getint(self, section, entry, default=None):
try:
if default is None:
return ConfigParser.getint(self, section, entry)
else:
return ConfigParser.getint(self, section, entry, raw=True, vars=None, fallback=default)
except Exception as e:
print(e)
self.error(section, entry)
def getboolean(self, section, entry, default=None):
try:
if default is None:
return ConfigParser.getboolean(self, section, entry)
else:
return ConfigParser.getboolean(self, section, entry, raw=True, vars=None, fallback=default)
except Exception as e:
print(e)
self.error(section, entry)
def configfilename(params):
return os.path.join(params.root, '.config.ini')
def createconfig(config_filename):
with open(config_filename, 'wt') as f:
f.writelines(CONFIG_DEFAULTS)
def read_config(params):
config_filename = configfilename(params)
try:
if not os.path.exists(config_filename) or params.resetcfg:
createconfig(config_filename)
except:
error('Error creating configuration file')
try:
getconfig(params, config_filename)
except Exception as e:
error('Error reading configuration file.', str(e), 'Use --resetcfg')
def getconfig(options, config_filename):
class Section:
pass
options.source = Section()
options.thumbnails = Section()
options.photobox = Section()
config = MyConfigParser()
config.read(config_filename)
# [source]
options.source.sourcedir = config.get('source', 'sourcedir')
options.source.bydir = config.getboolean('source', 'bydir')
options.source.bydate = config.getboolean('source', 'bydate')
options.source.diary = config.getboolean('source', 'diary')
options.source.recursive = config.getboolean('source', 'recursive')
options.source.dates = config.get('source', 'dates')
options.source.github_pages = config.getboolean('source', 'github_pages', default=False)
# [thumbnails]
options.thumbnails.media_description = config.getboolean('thumbnails', 'media_description')
options.thumbnails.subdir_caption = config.getboolean('thumbnails', 'subdir_caption')
options.thumbnails.thumbdelay = config.getint('thumbnails', 'thumbdelay')
options.thumbnails.threshold_thumbs = config.getint('thumbnails', 'threshold_thumbs')
options.thumbnails.threshold_htmlfiles = config.getint('thumbnails', 'threshold_htmlfiles', default=3)
# [photobox]
options.photobox.loop = config.getboolean('photobox', 'loop')
options.photobox.thumbs = config.getboolean('photobox', 'thumbs')
options.photobox.autoplay = config.getboolean('photobox', 'autoplay')
options.photobox.time = config.getint('photobox', 'time')
options.photobox.zoomable = config.getboolean('photobox', 'zoomable')
options.photobox.rotatable = config.getboolean('photobox', 'rotatable')
options.photobox.wheelNextPrev = config.getboolean('photobox', 'wheelNextPrev')
def setconfig(cfgname, section, key, value):
config = MyConfigParser()
config.read(cfgname)
config.set(section, key, value)
with open(cfgname, 'wt') as configfile:
config.write(configfile)
def setconfig_cmd(args):
config_filename = configfilename(args)
setconfig(config_filename, *args.setcfg)
def update_config(args):
# update only entries which can be modified from the command line (source section)
updates = (
('sourcedir', args.sourcedir),
('bydir', BOOL[args.bydir]),
('bydate', BOOL[args.bydate]),
('diary', BOOL[args.diary]),
('recursive', BOOL[args.recursive]),
('dates', args.dates),
('github_pages', BOOL[args.github_pages]),
)
# manual update to keep comments
cfgname = configfilename(args)
with open(cfgname) as f:
cfglines = [_.strip() for _ in f.readlines()]
for key, value in updates:
for iline, line in enumerate(cfglines):
if line.startswith(key):
cfglines[iline] = f'{key} = {value}'
break
with open(cfgname, 'wt') as f:
for line in cfglines:
print(line, file=f)
# -- Error handling -----------------------------------------------------------
def warning(*msg):
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +
' '.join(msg),
colorama.Style.RESET_ALL)
# Every error message error must be declared here to give a return code to the error
ERRORS = '''\
File not found
Directory not found
No date in post
Incorrect date value:
Posts are not ordered
Unable to read url
No image source (--sourcedir)
No blogger url (--url)
Missing or incorrect config value:
Error creating configuration file
Error reading configuration file.
Incorrect date format
Incorrect parameters:
'''
def errorcode(msg):
return ERRORS.splitlines().index(msg) + 1
def error(*msg):
print(colorama.Fore.RED + colorama.Style.BRIGHT +
' '.join(msg),
colorama.Style.RESET_ALL)
sys.exit(errorcode(msg[0]))
# -- Main ---------------------------------------------------------------------
BOOL = ('false', 'true')
def parse_command_line(argstring):
parser = argparse.ArgumentParser(description=None, usage=USAGE)
agroup = parser.add_argument_group('Commands')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument('--gallery', help='source in --sourcedir',
action='store', metavar='<root-dir>')
agroup.add_argument('--update', help='updates gallery with parameters in config file',
action='store', metavar='<root-dir>')
xgroup.add_argument('--create', help='create journal from medias in --sourcedir',
action='store', metavar='<root-dir>')
# testing
xgroup.add_argument('--resetcfg', help='reset config file to defaults',
action='store', metavar='<root-dir>')
xgroup.add_argument('--setcfg', help=argparse.SUPPRESS,
action='store', nargs=4, metavar='<root-dir>')
xgroup.add_argument('--idem', help=argparse.SUPPRESS,
action='store', metavar='<root-dir>')
# blogger
xgroup.add_argument('--blogger',
help='input md, html blogger ready in clipboard',
action='store', metavar='<root-dir>')
agroup = parser.add_argument_group('Parameters')
agroup.add_argument('--bydir', help='organize gallery by subdirectory',
action='store', default=None, choices=BOOL)
agroup.add_argument('--bydate', help='organize gallery by date',
action='store', default=None, choices=BOOL)
agroup.add_argument('--diary', help='organize gallery using markdown file diary',
action='store', default=None, choices=BOOL)
agroup.add_argument('--recursive', help='--sourcedir scans recursively',
action='store', default=None, choices=BOOL)
agroup.add_argument('--dates', help='dates interval',
action='store', default=None)
agroup.add_argument('--sourcedir', help='media directory',
action='store', default=None)
agroup.add_argument('--github_pages', help='github Pages compatibility',
action='store', default=None, choices=BOOL)
agroup.add_argument('--dest', help='output directory',
action='store')
agroup.add_argument('--forcethumb', help='force calculation of thumbnails',
action='store_true', default=False)
agroup.add_argument('--full', help='full html (versus blogger ready html)',
action='store_true', default=False)
agroup.add_argument('--check', dest='check_images', help='check availability of medias on blogger',
action='store_true')
agroup.add_argument('--url', dest='urlblogger', help='blogger post url',
action='store')
if argstring is None:
print('Type "galerie -h" for help')
sys.exit(1)
else:
args = parser.parse_args(argstring.split())
if args.update and (args.bydir or args.bydate or args.diary or args.sourcedir or
args.recursive or args.dates or args.github_pages):
error('Incorrect parameters:',
'--update cannot be used with creation parameters, use explicit command')
args.bydir = args.bydir == 'true'
args.bydate = args.bydate == 'true'
args.diary = args.diary == 'true'
args.recursive = args.recursive == 'true'
args.dates = 'source' if (args.dates is None) else args.dates
args.github_pages = args.github_pages == 'true'
args.root = (
args.create or args.gallery or args.update
or args.blogger or args.idem or args.resetcfg
)
if args.setcfg:
args.root = args.setcfg[0]
args.setcfg = args.setcfg[1:]
return args
def setup_part1(args):
"""
Made before reading config file (config file located in args.root).
Check and normalize root path.
"""
args.rootarg = args.root
rootext = os.path.splitext(args.rootarg)[1]
if rootext == '':
pass
else:
args.root = os.path.dirname(args.root)
if args.root:
args.root = os.path.abspath(args.root)
if not os.path.isdir(args.root):
if args.gallery:
os.mkdir(args.root)
else:
error('Directory not found', args.root)
def setup_part2(args):
"""
Made after reading config file.
Check for ffmpeg in path.
Create .thumbnails dir if necessary and create .nomedia in it.
Copy photobox file to destination dir.
Handle priority between command line and config file.
"""
if args.update:
args.sourcedir = args.source.sourcedir
args.bydir = args.source.bydir
args.bydate = args.source.bydate
args.diary = args.source.diary
args.recursive = args.source.recursive
args.dates = args.source.dates
args.github_pages = args.source.github_pages
elif args.gallery:
args.source.sourcedir = args.sourcedir
args.source.bydir = args.bydir
args.source.bydate = args.bydate
args.source.diary = args.diary
args.source.recursive = args.recursive
args.source.dates = args.dates
args.source.github_pages = args.github_pages
update_config(args)
if args.github_pages:
args.html_suffix = '.html'
else:
args.html_suffix = '.htm'
rootext = os.path.splitext(args.rootarg)[1]
if rootext:
args.rootname = os.path.basename(args.rootarg)
else:
args.rootname = 'index' + args.html_suffix
if args.sourcedir:
args.sourcedir = os.path.abspath(args.sourcedir)
if os.path.splitdrive(args.sourcedir)[0]:
drive, rest = os.path.splitdrive(args.sourcedir)
args.sourcedir = drive.upper() + rest
if not os.path.isdir(args.sourcedir):
error('Directory not found', args.sourcedir)
else:
if args.gallery and args.diary is False and args.update is None:
error('Directory not found', 'Use --sourcedir')
if args.dest:
args.dest = os.path.abspath(args.dest)
if args.dest is None:
args.dest = args.root
if args.blogger and args.urlblogger is None:
error('No blogger url (--url)')
if args.gallery or args.update:
# check for ffmpeg and ffprobe in path
for exe in ('ffmpeg', 'ffprobe'):
try:
check_output([exe, '-version'])
except FileNotFoundError:
error('File not found', exe)
if args.github_pages:
args.thumbrep = 'thumbnails'
else:
args.thumbrep = '.thumbnails'
args.thumbdir = os.path.join(args.dest, args.thumbrep)
if not os.path.exists(args.thumbdir):
os.mkdir(args.thumbdir)
open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()
favicondst = os.path.join(args.dest, 'favicon.ico')
if not os.path.isfile(favicondst):
faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')
shutil.copyfile(faviconsrc, favicondst)
photoboxdir = os.path.join(args.dest, 'photobox')
if not os.path.exists(photoboxdir):
photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')
shutil.copytree(photoboxsrc, photoboxdir)
if args.dates:
if not(args.gallery or args.create):
# silently ignored for the moment, otherwise all other commands will
# launch a wanrning or an error on the default --dates value
pass
if args.dates == 'source':
pass
elif args.dates == 'diary':
if args.create:
error('Incorrect date format', args.dates)
elif re.match(r'\d+-\d+', args.dates):
date1, date2 = args.dates.split('-')
if validate_date(date1) and validate_date(date2):
args.dates = date1, date2
else:
error('Incorrect date format', args.dates)
else:
error('Incorrect date format', args.dates)
def main(argstring=None):
colorama.init()
args = parse_command_line(argstring)
setup_part1(args)
read_config(args)
setup_part2(args)
try:
if args.gallery or args.update:
create_gallery(args)
elif args.create:
create_diary(args)
elif args.blogger:
prepare_for_blogger(args)
elif args.idem:
idempotence(args)
elif args.setcfg:
setconfig_cmd(args)
except KeyboardInterrupt:
warning('Interrupted by user.')
if __name__ == '__main__':
main(' '.join(sys.argv[1:]))
|
3,378 | b865c37623f405f67592d1eabc620d11ff87827e | class subset:
def __init__(self, weight, itemSet, size, setNum):
self.weight = weight
self.itemSet = itemSet
self.size = size
self.setNum = setNum
def findCover(base, arr):
uniq = [] #array that can be union
uni = [] #array has been unionized w/ base
if len(base.itemSet) == rangeOfVal:
# print("COVER:", base.itemSet)
return base
remain = rangeOfVal
# Search through arr to find all potential subsets
for i in arr:
# print("compare: ", i.itemSet)
if base.itemSet.isdisjoint(i.itemSet) == True:
# Unique array
uniq.append(i)
remain = remain - len(i.itemSet)
# print("uniq: ", len(uniq))
addedSub = subset(base.weight + i.weight,
base.itemSet.union(i.itemSet),
base.size + i.size,
str(base.setNum) + " " + str(i.setNum))
# Union array
uni.append(addedSub)
print("added:", addedSub.itemSet)
if addedSub.size == rangeOfVal:
# print("COVER:", addedSub.itemSet)
return addedSub
print()
for j in uni:
# print(j.setNum)
if remain == len(base.itemSet):
findCover(j, uniq)
# print("_____________________________NONE_______________________________")
return
# fileName="./inputs/input_group115.txt"
fileName="Input_attempt3.txt"
f=open(fileName, "r")
rangeOfVal=int(f.readline()) # n
numOfSub=int(f.readline()) # m
num=0
minWeight=500001
minCover=[]
subsetList=[]
# Loop to read through file and set up the data structures
# to hold all the values
while True:
itemSet=f.readline()
if itemSet == "":
break
else:
weight=int(f.readline())
arrItems=itemSet.split(" ")
i=0
# Convert each item into an int and delete any \n
for item in arrItems:
if item != "\n":
arrItems[i]=int(item)
i += 1
else:
arrItems.remove("\n")
arrItems.sort()
s=subset(weight, set(arrItems), len(arrItems), num)
subsetList.append(s)
num += 1
# print("---------------------------------------------")
# for s in subsetList:
# print(s.itemSet)
# print("---------------------------------------------")
covers = []
inc = 1
for base in subsetList:
# print()
print("base:", base.setNum)
o = findCover(base, subsetList[inc:len(subsetList)])
if o != None:
print("here!")
covers.append(o)
# print(o.setNum)
inc += 1
for w in covers:
if w.weight < minWeight:
minWeight = w.weight
# if type(s.setNum) == int: continue
# else: minCover = (s.setNum).split(" ").sort()
minCover = w.setNum
print(minWeight)
print(minCover)
# for cov in covers:
# print(cov.itemSet)
# #
|
3,379 | bb1a6815649eb9e79e2ab1e110ea8acd8adce5aa | def primeiras_ocorrencias (str):
dic={}
for i,letra in enumerate(str):
if letra not in dic:
dic [letra]=i
return dic |
3,380 | 848394e1e23d568f64df8a98527a8e177b937767 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
class LibpopplerConan(ConanFile):
name = "poppler"
version = "0.73.0"
description = "Poppler is a PDF rendering library based on the xpdf-3.0 code base"
topics = ("conan", "libpoppler", "poppler", "pdf")
url = "https://github.com/zehome/conan-poppler"
homepage = "https://poppler.freedesktop.org/"
author = "Laurent Coustet <ed@zehome.com>"
license = "GPL-3.0-only"
generators = "cmake"
exports_sources = "CMakeLists.txt", "patches/*.diff"
settings = "os", "compiler", "build_type", "arch"
_source_subfolder = "poppler-src"
options = {
"shared": [True, False], "with_lcms": [True, False],
"with_cpp": [True, False], "with_cairo": [True, False],
"with_qt": [True, False], "with_splash": [True, False],
"with_curl": [True, False],
}
default_options = (
"shared=False", "with_qt=False", "with_lcms=False", "with_cpp=False",
"with_cairo=False", "with_curl=False",
#LC: Specific
# "libpng:shared=False",
# "freetype:with_png=False", "freetype:shared=False",
# "freetype:with_zlib=False", "freetype:with_bzip2=False",
# "zlib:shared=False",
# "openjpeg:shared=False",
# "cairo:shared=False",
# "glib:shared=False",
# "libcurl:shared=False", "OpenSSL:shared=False",
"qt:opengl=desktop", "qt:qtxmlpatterns=True", "qt:shared=True",
)
requires = (
"zlib/1.2.11@conan/stable",
"libpng/1.6.36@bincrafters/stable",
"libjpeg/9c@bincrafters/stable",
"openjpeg/2.3.0@bincrafters/stable",
"libtiff/4.0.9@bincrafters/stable",
"freetype/2.9.1@clarisys/stable",
)
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("cairo")
def configure(self):
if self.options.with_lcms:
self.requires.add("lcms/2.9@bincrafters/stable")
if self.options.with_qt:
self.requires.add("qt/5.12.0@clarisys/stable")
if self.settings.os != "Windows" and self.options.with_cairo:
self.requires.add("cairo/1.15.14@bincrafters/stable")
self.requires.add("glib/2.56.1@bincrafters/stable")
if self.settings.os == "Windows" and not self.options.with_splash:
raise ConanInvalidConfiguration("Option with_splash=True is mandatory on windows")
if self.options.with_curl: # TODO: does not link on windows / shared=False
self.requires.add("libcurl/7.61.1@bincrafters/stable")
# if self.settings.os != "Windows":
# self.requires.add("fontconfig/2.13.1@clarisys/stable")
def source(self):
source_url = "https://poppler.freedesktop.org/"
tools.get("{0}/poppler-{1}.tar.xz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
if os.path.exists(self._source_subfolder):
shutil.rmtree(self._source_subfolder)
os.rename(extracted_dir, self._source_subfolder)
# TODO: Ugly.. May need to be replaced by something
# better
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def _configure_cmake(self):
cmake = CMake(self)
cmake.verbose = True
cmake.definitions["ENABLE_SPLASH"] = self.options.with_splash
cmake.definitions["ENABLE_ZLIB"] = True
cmake.definitions["BUILD_QT5_TESTS"] = False
cmake.definitions["ENABLE_CPP"] = self.options.with_cpp
cmake.definitions["ENABLE_CMS"] = "lcms2" if self.options.with_lcms else 'none'
cmake.definitions["ENABLE_LIBCURL"] = self.options.with_curl
if self.settings.os == "Windows":
cmake.definitions["LIB_SUFFIX"] = ""
cmake.definitions["FONT_CONFIGURATION"] = "win32"
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
#shutil.rmtree(os.path.join(self._source_subfolder, 'cmake'))
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
# If the CMakeLists.txt has a proper install method, the steps below may be redundant
# If so, you can just remove the lines below
include_folder = os.path.join(self._source_subfolder, "include")
self.copy(pattern="*", dst="include", src=include_folder)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so*", dst="lib", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
|
3,381 | 36d596c1019dbaaf8dc394633ca464421517dc21 | """
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
import os
from flask import Flask, request, jsonify, url_for
from flask_migrate import Migrate
from flask_swagger import swagger
from flask_cors import CORS
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token, create_refresh_token,
get_jwt_identity*
)
from utils import APIException, generate_sitemap
from models import db
from models import User
from passlib.hash import pbkdf2_sha256 as sha256
app = Flask(__name__)
app.url_map.strict_slashes = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600
MIGRATE = Migrate(app, db)
db.init_app(app)
CORS(app)
jwt = JWTManager(app)*
# Handle/serialize errors like a JSON object
@app.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
# generate sitemap with all your endpoints
@app.route('/')
def sitemap():
return generate_sitemap(app)
@app.route('/hello', methods=['POST', 'GET'])
@jwt_required
def handle_hello():
current_user = get_jwt_identity()
response_body = {
"hello": current_user
}
return jsonify(response_body), 200
@app.route('/login', methods=['POST'])*
def handle_login():
data = request.json
user = User.query.filter_by(username = data["username"]).first()
if user is None:
return jsonify ({
"error": "el usuario no existe"
}), 404
if sha256.verify(data["password"], user.password):
mivariable = create_access_token(identity=data["username"])
refresh = create_refresh_token(identity=data["username"])
return jsonify ({
"token": mivariable,
"refresh": refresh
}), 200
return jsonify ({
"error":"la contraseña no es valida"
}), 404
@app.route('/register', methods=['POST'])*
def handle_register():
data = request.json
user = User()
user.username = data["username"]
user.mail = data["mail"]
user.password = sha256.hash(data["password"])
db.session.add(user)
db.session.commit()
return jsonify(user.serialize()), 200
# this only runs if `$ python src/main.py` is executed
if __name__ == '__main__':
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT, debug=False)
|
3,382 | 07332e2da5458fda2112de2507037a759d3c62db | def main():
num = int(input('dia: '))
dia(num)
def dia(a):
if a == 1:
print('Domingo !')
elif a == 2:
print('Segunda !')
else:
print('valor invalido !')
main()
|
3,383 | 60c849d213f6266aeb0660fde06254dfa635f10f | import optparse
from camera import apogee_U2000
if __name__ == "__main__":
parser = optparse.OptionParser()
group1 = optparse.OptionGroup(parser, "General")
group1.add_option('--s', action='store', default=1, dest='mode', help='set cooler on/off')
args = parser.parse_args()
options, args = parser.parse_args()
try:
mode = bool(options.mode)
except TypeError:
print "Set must be boolean"
c = apogee_U2000(camera_idx=0)
c.setCooler(mode)
c.disconnect()
|
3,384 | 68371acc58da6d986d94d746abb4fea541d65fdd | #!/usr/bin/env python
import argparse
import subprocess
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def quote(items):
return ["'" + item + "'" for item in items]
if module_exists('urllib.parse'):
from urllib.parse import unquote
else:
from urllib import unquote
parser = argparse.ArgumentParser()
parser.add_argument("url", help="The url to send the request to.")
parser.add_argument("--data")
parser.add_argument("-H", action="append", dest='headers')
# HTTPie arguments
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--timeout", type=int)
# curlie arguments
parser.add_argument("-q", "--quiet", action="store_true")
# ignored curl arguments
parser.add_argument("--compressed", action="store_true")
args = parser.parse_args()
flags = []
method = "GET"
data = None
if args.data:
data = quote(unquote(args.data).split("&"))
method = "POST"
if "Content-Type: application/x-www-form-urlencoded" in args.headers:
flags.append("-f")
headers = quote(args.headers)
httpieArgs = []
if len(flags) > 0:
httpieArgs.append(" ".join(flags))
httpieArgs.append(method)
httpieArgs.append("'" + args.url + "'")
if headers and len(headers) > 0:
httpieArgs.append(" ".join(headers))
if data and len(data) > 0:
httpieArgs.append(' '.join(data))
if args.verbose:
httpieArgs.append("--verbose")
if args.timeout is not None:
httpieArgs.append("--timeout " + args.timeout)
cmd = "http " + " ".join(httpieArgs)
if not args.quiet:
print("\n" + cmd + "\n")
subprocess.call(cmd, shell=True)
|
3,385 | 12cd3dbf211b202d25dc6f940156536c9fe3f76f | from aws_cdk import core as cdk
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import (core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam,
aws_ecs_patterns as ecs_patterns)
class kdECSDemo(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# 建VPC与ECS Cluster
# TODO: 即使指定 max_azs, 也只能部署2个AZ
vpc = ec2.Vpc(self, "ECSVPC", cidr='10.0.0.0/16')
cluster = ecs.Cluster(self, "ECSCluster", vpc=vpc)
#建Task Definition
task_definition = ecs.FargateTaskDefinition(self, "ECSDemoTaskDefinition",
task_role=iam.Role.from_role_arn(self, "fargate_task_role", "arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens"),
execution_role=iam.Role.from_role_arn(self, "fargate_task_execution_role", "arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole")
)
task_definition.add_volume(name="data")
# App Container
app_container = task_definition.add_container(
"AppContainer",
image=ecs.ContainerImage.from_ecr_repository(
ecr.Repository.from_repository_name(self, id="app-file-image", repository_name="app-file")
),
logging=ecs.FireLensLogDriver()
)
app_container.add_mount_points(ecs.MountPoint(
container_path="/data/logs",
read_only=False,
source_volume="data"
))
# app_container.add_port_mappings(ecs.PortMapping(container_port=80))
# Log Router
fluentbit_container = ecs.FirelensLogRouter(self, "fluentbit_container",
firelens_config=ecs.FirelensConfig(
type=ecs.FirelensLogRouterType.FLUENTBIT,
options=ecs.FirelensOptions(
config_file_value="/extra.conf"
)
),
task_definition=task_definition,
image=ecs.ContainerImage.from_ecr_repository(
ecr.Repository.from_repository_name(self, id="log-router", repository_name="firelens-file")
),
logging=ecs.AwsLogDriver(stream_prefix="/ecs/firelens-fluentbit-demo/")
)
fluentbit_container.add_mount_points(ecs.MountPoint(
container_path="/data/logs",
read_only=False,
source_volume="data"
))
# #建Service
# ecs_patterns.ApplicationLoadBalancedFargateService(self, "ServiceWithLogging",
# cluster=cluster,
# desired_count=1, # Default is 1
# task_definition=task_definition,
# public_load_balancer=True) # Default is False
|
3,386 | c27ca6a8c38f2b96011e3a09da073ccc0e5a1467 | from django.apps import AppConfig
class Iapp1Config(AppConfig):
name = 'iapp1'
|
3,387 | 4b83887e8d8e5c5dc7065354d24044d3c3a48714 | #!/bin/env python
import sys
import os
import collections
import re
import json
import urllib
import urllib.request
import uuid
import time
PROCESSOR_VERSION = "0.1"
def process(trace_dir, out_dir):
#order files
trace_files = os.listdir(trace_dir)
trace_files = sorted(trace_files)
if trace_files[0] == "error.log": #we need to do this in case the last traces are in an error log file that wasn't rotated yet
print ("Rotating to properly order logs.")
trace_files = collections.deque(trace_files)
trace_files.rotate(-1)
#combine
full_trace = b""
all_lines= ""
for file_name in trace_files:
print ("Processing: " + str(file_name))
with open(os.path.join(trace_dir, file_name), "rb") as f:
for line in f:
try:
#print(line.decode('utf-8'))
all_lines += line.decode('utf-8')
except UnicodeDecodeError:
print("weird text")
# let's fix any pesky solitary \n's (these are at the end of all the bodies)
full_trace = re.sub(r'(?<!\r)\n', '\r\n\r\n', all_lines)
'''
Is the issue with the input or my processing?
tmp_file = open('full_trace.json', 'wb')
json.dump(full_trace, tmp_file)
tmp_file.close()
INPUT Issue
'''
#do the first step of preprocessing, getting raw sessions
print( "Collecting raw sessions")
raw_sessions = dict()
full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n')))
for line in full_trace_iterator:
#TODO IPv6
#TODO Responses (we get them but do we want to do this a different way)
send_recv = re.findall(r'(SEND|RECV)', line)
ipv4_port = re.findall(r'[0-9]+(?:\.[0-9]+){3}:[0-9]+', line)
if ipv4_port:
port = re.findall(r':[0-9]+$', ipv4_port[0])
if port:
if port[0] == ":443" or port[0] == ":80":
continue # we don't want the server conn side stuff yet
if send_recv and ipv4_port:
ip_port_key = ipv4_port[0]
this_trace = line
while True:
try:
next_line = next(full_trace_iterator)
this_trace += next_line
end_trace = re.findall(r'\[End Trace\]', next_line)
if end_trace:
break
except Exception as e:
#reached the end of the file
print( e)
break
if ip_port_key not in raw_sessions:
raw_sessions[ip_port_key] = this_trace
print(ip_port_key)
else:
raw_sessions[ip_port_key] += this_trace
#do the second step of preprocessing, getting JSONs from raw sessions
print( "Constructing session JSONs")
session_JSONs = dict()
for session, raw_traces in raw_sessions.items():
#basic data
session_JSONs[session] = dict()
session_JSONs[session]["version"] = PROCESSOR_VERSION
session_JSONs[session]["encoding"] = "url_encoded"
# let's get the raw text from the traces
raw_text = ""
timestamp = ""
timestamp_list = list()
for line in raw_traces.splitlines(raw_traces.count('\n')):
trace_line = re.findall(r'^\d{8}\.\d{2}h\d{2}m\d{2}s', line)
timestamp = re.findall(r'\[\d{10}\.\d{3}\]', line)
if timestamp:
timestamp_list.append(timestamp[0][1:-1])
if not trace_line:
raw_text += line
#get session start timestamp
session_JSONs[session]["timestamp"] = timestamp_list[0]
# let's parse out requests and responses
count = -1
delimiter = "\r\n\r\n"
is_request_chunk = True
raw_text_chunks = iter(raw_text.split(delimiter))
session_JSONs[session]["txns"] = list()
for chunk in raw_text_chunks:
#check if each chunk is request or response if it is do so accordingly
#otherwise append it to the previous chunk's data
request_chunk = re.findall(r'^\S+\s/\S+\sHTTP/\d\.\d\r\n', chunk)
response_chunk = re.findall(r'^HTTP/\d\.\d\s\d{3}\s[\s\S]+\r\n', chunk)
if request_chunk:
count += 1
is_reqeust_chunk = True
chunk += delimiter
if count <= len(session_JSONs[session]["txns"]):
session_JSONs[session]["txns"].append(dict())
session_JSONs[session]["txns"][count]["request"] = dict()
session_JSONs[session]["txns"][count]["request"]["timestamp"] = timestamp_list[count - 1]
session_JSONs[session]["txns"][count]["request"]["headers"] = chunk
session_JSONs[session]["txns"][count]["uuid"] = uuid.uuid4().hex
elif response_chunk:
is_request_chunk = False
chunk += delimiter
if count <= len(session_JSONs[session]["txns"]):
session_JSONs[session]["txns"].append(dict())
session_JSONs[session]["txns"][count]["response"] = dict()
session_JSONs[session]["txns"][count]["response"]["timestamp"] = timestamp_list[count - 1]
session_JSONs[session]["txns"][count]["response"]["headers"] = chunk
else: #is body chunk
try:
if count == -1: continue #if we have garbage at the front
chunk = urllib.parse.quote(chunk)
if is_request_chunk:
if "body" not in session_JSONs[session]["txns"][count]["request"]:
session_JSONs[session]["txns"][count]["request"]["body"] = chunk
else:
session_JSONs[session]["txns"][count]["request"]["body"] += chunk
else:
if "body" not in session_JSONs[session]["txns"][count]["response"]:
session_JSONs[session]["txns"][count]["response"]["body"] = chunk
else:
session_JSONs[session]["txns"][count]["response"]["body"] += chunk
except KeyError as k:
continue # for now we're dropping malformed bodies. will not be able to do this when we're validating. might have to go edit wiretracing code to give us better delimiters here for parsing. right now isn't particularly straightforward
print(len(session_JSONs[session]["txns"]))
session_JSONs[session]["txns"] = list(filter(bool, session_JSONs[session]["txns"]))
if len(session_JSONs[session]["txns"]) == 0:
del session_JSONs[session]
#write out
unicode_errors = 0
print( "Writing sessions to disk")
out_files = dict()
for session, data in session_JSONs.items():
out_files[session] = open(os.path.join(out_dir, 'session_' + str(session)) + '.json', 'w')
try:
json.dump(data, out_files[session])
out_files[session].close()
except:
unicode_errors += 1
out_files[session].close()
os.remove(os.path.join(out_dir, 'session_' + str(session)) + '.json')
print( str(unicode_errors) + " unicode errors")
def main(argv):
if len(argv) != 3:
print( "Script to preprocess trace logs for client.")
print( "Outputs JSONs to directory 'sessions'")
print( "Usage: python " + str(argv[0]) + " <in directory> <out directory>")
return
if not os.path.isdir(argv[1]):
print( str(argv[1]) + " is not a directory. Aborting.")
return
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
else:
print( str(argv[2]) + " already exists, choose another output directory!")
return
t1=time.time()
process(argv[1], argv[2])
t2=time.time()
print("time taken:",(t2-t1))
if __name__ == "__main__":
main(sys.argv)
|
3,388 | 5b3514af839c132fda9a2e6e178ae62f780f291e | from matplotlib import cm
from datascience.visu.util import plt, save_fig, get_figure
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
y = np.array([
[0.8869, 1.],
[1.-0.578, 0.],
[0.7959, 1.],
[0.8618, 1.],
[1.-0.2278, 0.],
[0.6607, 1.],
[0.7006, 1.],
[1.-0.4859, 0.],
[0.6935, 1.],
[0.9048, 1.],
[0.6681, 1.],
[0.7585, 1.],
[1.-0.5063, 0.],
[1.-0.4516, 0.],
[1.-0.5158, 0.],
[1.-0.5873, 0.],
[1.-0.7682, 0.],
[0.8620, 1.],
[1-0.7337, 0.],
[0.9412, 1.],
[1.-0.5819, 0.],
[.2738, 1.],
[1.-.5136, 0.],
[.8819, 1.],
[1.-.4387, 0.],
[1.-.6257, 0.],
[.7857, 1.],
[1.-.3722, 0.],
[1.-0.8049, 0.],
[0.7864, 1.],
[1.-0.2372, 0.],
[0.7934, 1.],
[0.9583, 1.],
[0.9739, 1.],
[1.-0.3556, 0.],
[1.-0.2551, 0.],
[1.-0.4532, 0.],
[0.4605, 1.],
[0.7572, 1.],
[0.9496, 1.],
[0.8268, 1.],
[1.-0.4876, 0.],
[0.8523, 1.],
[1.-0.2629, 0.],
[1.-0.9021, 0.],
[0.6977, 1.],
[0.9142, 1.],
[1.-0.8175, 0.],
[1.-0.4865, 0.],
[0.9110, 1.],
[1.-0.2159, 0.],
[1.-0.6943, 0.],
[1.-0.2753, 0.],
[0.8590, 1.],
[0.8273, 1.],
[1.-0.5169, 0.],
[1.-0.7412, 0.]
])
fpr, tpr, thresholds = roc_curve(y[:, 1], y[:, 0], pos_label=1)
ax = plt('roc_curve').gca()
ax.set_xlim([-0.007, 1.0])
ax.set_ylim([0.0, 1.01])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (AUC: %.3f)' % auc(fpr, tpr))
ax.plot([0, 1], [0, 1], color='red', linestyle='--', label='Random model')
ax.plot(fpr, tpr, color='yellow', label='IArt')
ax.plot([0, 0, 1], [0, 1, 1], color='green', linestyle='--', label='Perfect model')
ax.legend(loc="lower right")
ax = plt('confusion_matrix').gca()
y_threshold = (y > 0.7).astype(int)
matrix = confusion_matrix(y[:, 1], y_threshold[:, 0])
matrix = matrix / matrix.astype(np.float).sum(axis=1)
im = ax.imshow(matrix, cmap=cm.Greys_r, extent=(-3, 3, 3, -3))
ax.axis('off')
get_figure('confusion_matrix').colorbar(im)
save_fig()
|
3,389 | 2465a73d958d88dcd27cfac75a4e7b1fcd6a884e | # -*- coding:utf-8 -*-
import datetime
import json
import os
import urllib
import requests
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import properties
from time import sleep
from appium import webdriver
def logPrint(logstr):
pyfileName = str(__file__).split(".py")[0].split("/")[-1]
filepath = ".\\log\\" + pyfileName + '-runlog.log'
now = str(datetime.datetime.now())
logstr = now + ' ' + logstr
with open(filepath, 'a', encoding='utf-8') as f:
print(logstr)
f.write(logstr + '\t\n')
def isElementExist(driver, xpath):
try:
driver.find_element_by_xpath(xpath)
return True
except:
return False
def find_toast(driver, contains_message):
'''判断toast信息'''
locat = ("xpath", '//*[contains(@text,"' + contains_message + '")]')
try:
element = WebDriverWait(driver, 2).until(EC.presence_of_element_located(locat))
return True
except:
return False
def restart_app(driver):
optsRestartAPP = {'command': 'am broadcast -a',
'args': ['com.inhand.intent.INBOXCORE_RESTART_APP']}
driver.execute_script("mobile: shell", optsRestartAPP)
def wifi_disable(driver):
opts = {'command': 'su 0',
'args': ['svc wifi disable']}
driver.execute_script("mobile: shell", opts)
def wifi_enable(driver):
opts = {'command': 'su 0',
'args': ['svc wifi enable']}
driver.execute_script("mobile: shell", opts)
if __name__ == '__main__':
try:
logpath = os.getcwd() + "\\log"
# print(logpath)
os.mkdir(logpath)
except:
pass
pyfileName = str(__file__).split(".py")[0].split("/")[-1]
logfilepath = ".\\log\\" + pyfileName + '-runlog.log'
try:
os.remove(logfilepath)
except:
pass
host = 'http://182.150.21.232:10081'
requesturl = "/oauth2/access_token"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}
get_token_value = {
"client_id": "000017953450251798098136",
"client_secret": "08E9EC6793345759456CB8BAE52615F3",
"grant_type": "password",
"username": "chenzhiz@inhand.com.cn",
"password": "czz123456",
"password_type": "1",
"language": "2"
}
data = urllib.parse.urlencode(get_token_value).encode('utf-8')
url = host + requesturl
request = urllib.request.Request(url, data, headers)
token_response = urllib.request.urlopen(request).read().decode('utf-8')
logPrint(token_response)
access_token = json.loads(token_response)['access_token']
requesturl = "/api/goods/list?cursor=0&limit=30&name=&access_token=" + access_token
url = host + requesturl
response = requests.get(url=url, headers={'Content-Type': 'application/json'})
goods_count = json.loads(response.text)['total']
print(goods_count)
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)
sleep(0.5)
wifi_enable(driver)
sleep(0.5)
opts1 = {'command': 'rm -rf',
'args': ['/sdcard/inbox/data/picture']}
redata = driver.execute_script("mobile: shell", opts1)
driver.find_element_by_xpath("//android.widget.TextView[@text='货道配置']").click()
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
try:
xpath = "//android.widget.TextView[contains(@text,'总商品数 " + str(goods_count) + "')]"
logPrint(xpath)
WebDriverWait(driver, 2, 0.5).until(lambda x: x.find_element_by_xpath(xpath))
progressFlag = True
except Exception as e:
print(e)
progressFlag = False
if progressFlag:
logPrint("同步过程:PASS")
else:
logPrint("同步过程:FAIL!!")
loadmasklocator = ("xpath", "//android.widget.ProgressBar")
try:
WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))
completeFlag = True
except Exception as e:
completeFlag = False
if completeFlag:
logPrint("同步结果出现:PASS")
else:
logPrint("同步结果出现:FAIL!!")
if isElementExist(driver, "//android.widget.TextView[contains(@text,'操作成功')]"):
logPrint("同步成功:PASS")
else:
logPrint("同步成功:FAIL!!")
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
sleep(20)
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)
driver.find_element_by_xpath("//android.widget.TextView[@text='货道配置']").click()
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
try:
WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))
completeFlag = True
except Exception as e:
completeFlag = False
if completeFlag:
logPrint("同步结果出现:PASS")
else:
logPrint("同步结果出现:FAIL!!")
if isElementExist(driver, "//android.widget.TextView[contains(@text,'已经是最新配置')]"):
logPrint("已经是最新配置:PASS")
else:
logPrint("已经是最新配置:FAIL!!")
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
wifi_disable(driver)
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
okdialoglocator = ("xpath", "//android.widget.TextView[contains(@text,'操作失败')]")
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located(okdialoglocator))
failFlag = True
except Exception as e:
failFlag = False
if failFlag:
logPrint("断网同步,操作失败:PASS")
else:
logPrint("断网同步,操作失败:FAIL!!")
wifi_enable(driver)
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
opts1 = {'command': 'rm -rf',
'args': ['/sdcard/inbox/data/picture']}
redata = driver.execute_script("mobile: shell", opts1)
sleep(10)
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
sleep(5)
wifi_disable(driver)
loadmasklocator = ("xpath", "//android.widget.ProgressBar")
try:
WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))
completeFlag = True
except Exception as e:
completeFlag = False
if completeFlag:
logPrint("同步结果出现:PASS")
else:
logPrint("同步结果出现:FAIL!!")
if isElementExist(driver, "//android.widget.TextView[contains(@text,'操作成功')]"):
logPrint("断网结束同步:PASS")
else:
logPrint("断网结束同步:FAIL!!")
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
sleep(12)
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)
driver.find_element_by_xpath("//android.widget.TextView[@text='货道配置']").click()
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
noNetFlag = find_toast(driver, "平台")
if noNetFlag:
logPrint("未与平台建立连接:PASS")
else:
logPrint("未与平台建立连接:FAIL!!")
wifi_enable(driver)
|
3,390 | c7258d77db2fe6e1470c972ddd94b2ed02f48003 | from multiprocessing import Process, Queue
def f(q):
for i in range(0,100):
print("come on baby")
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
for j in range(0, 2000):
if j == 1800:
print(q.get())
print(j)
# 특징 main process 와 subprocess 가 각각 실행되다가 1800 에서 subprocess 가 실행될때까지 기다려줌
# 0
# 1
# 2
# 3
# 4
# 5
# 6
# 7
# 8
# 9
# 10
# 11
# 12
# 13
# 14
# 15
# 16
# 17
# ...
# ...
# 1276
# 1277
# 1278
# 1279
# 1280
# 1281
# 1282
# 1283
# 1284
# 1285
# 1286
# 1287
# 1288
# 1289
# 1290
# 1291
# 1292
# 1293
# 1294
# 1295
# come on baby
# 1296
# come on baby
# 1297
# come on baby
# 1298
# come on baby
# 1299
# come on baby
# 1300
# come on baby
# 1301
# come on baby
# 1302
# come on baby
# 1303
# 1304
# come on baby
# 1305
# come on baby
# 1306
# come on baby
# 1307
# come on baby
# 1308
# come on baby
# 1309
# come on baby
# 1310
# come on baby
# 1311
# come on baby
# 1312
# come on baby
# 1313
# come on baby
# 1314
# come on baby
# 1315
# come on baby
# 1316
# come on baby
# 1317
# come on baby
# 1318
# come on baby
# 1319
# come on baby
# 1320
# come on baby
# 1321
# come on baby
# 1322
# come on baby
# 1323
# come on baby
# 1324
# come on baby
# 1325
# come on baby
# 1326
# come on baby
# 1327
# come on baby
# 1328
# come on baby
# 1329
# come on baby
# 1330
# come on baby
# 1331
# come on baby
# 1332
# come on baby
# 1333
# come on baby
# 1334
# come on baby
# 1335
# come on baby
# 1336
# come on baby
# 1337
# come on baby
# 1338
# come on baby
# 1339
# come on baby
# 1340
# come on baby
# 1341
# come on baby
# 1342
# come on baby
# 1343
# come on baby
# 1344
# come on baby
# 1345
# come on baby
# 1346
# come on baby
# 1347
# come on baby
# 1348
# come on baby
# 1349
# come on baby
# 1350
# come on baby
# 1351
# come on baby
# 1352
# come on baby
# 1353
# come on baby
# 1354
# come on baby
# 1355
# come on baby
# 1356
# come on baby
# 1357
# come on baby
# 1358
# come on baby
# 1359
# come on baby
# 1360
# come on baby
# 1361
# come on baby
# 1362
# come on baby
# 1363
# come on baby
# 1364
# come on baby
# 1365
# come on baby
# 1366
# come on baby
# 1367
# come on baby
# 1368
# come on baby
# 1369
# come on baby
# 1370
# come on baby
# 1371
# come on baby
# 1372
# come on baby
# 1373
# come on baby
# 1374
# come on baby
# 1375
# come on baby
# 1376
# come on baby
# 1377
# come on baby
# 1378
# come on baby
# 1379
# come on baby
# 1380
# come on baby
# 1381
# come on baby
# 1382
# come on baby
# 1383
# come on baby
# 1384
# come on baby
# 1385
# come on baby
# 1386
# come on baby
# 1387
# come on baby
# 1388
# come on baby
# 1389
# come on baby
# 1390
# come on baby
# 1391
# come on baby
# 1392
# come on baby
# 1393
# come on baby
# 1394
# come on baby
# 1395
# come on baby
# 1396
# 1397
# 1398
# 1399
# 1400
# 1401
# 1402
# 1403
# 1404
# 1405
# ...
# ...
# 1786
# 1787
# 1788
# 1789
# 1790
# 1791
# 1792
# 1793
# 1794
# 1795
# 1796
# 1797
# 1798
# 1799
# [42, None, 'hello']
# 1800
# 1801
# 1802
# 1803
# 1804
# 1805
# 1806
# 1807
# 1808
# 1809
# ...
# ...
# 1989
# 1990
# 1991
# 1992
# 1993
# 1994
# 1995
# 1996
# 1997
# 1998
# 1999
|
3,391 | 5029f3e2000c25d6044f93201c698773e310d452 | ###
# This Python module contains commented out classifiers that I will no longer
# be using
###
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Using Decision trees
# dt = DecisionTreeClassifier(max_depth=None)
# dt.fit(X_train_cv, y_train)
# print("DT Accuracy = " + str(dt.score(X_dev_cv, y_dev)))
# Using AdaBoost (takes too long)
# clf = DecisionTreeClassifier()
# ada = AdaBoostClassifier(clf)
# ada.fit(X_train_cv, y_train)
# print("ADA accuracy = " + str(ada.score(X_dev_cv, y_dev)))
# Using Bagging as a classifier with KNN
# clf = KNeighborsClassifier(n_neighbors=10)
# bag = BaggingClassifier(clf, max_features=0.5, max_samples=0.5)
# bag.fit(X_top10_train, y_top10_train)
# print("Bag accuracy = " + str(bag.score(X_top10_dev, y_top10_dev)))
# Using a random forest classifier
# rforest = RandomForestClassifier(max_depth=10000)
# rforest.fit(X_train_cv, y_train)
# print("Random Forest accuracy = " + str(rforest.score(X_dev_cv, y_dev)))
|
3,392 | d2acc789224d66de36b319ae457165c1438454a3 | from django import template
from django.conf import settings
from django.utils.html import escape
from django.utils.translation import get_language
from cms.models import Page
from cms.conf.global_settings import LANGUAGE_NAME_OVERRIDE
register = template.Library()
# TODO: There's some redundancy here
# TODO: {% cms_title nav %}
class CmsSubpagesNode(template.Node):
def __init__(self, nav, varname):
self.nav = nav
self.varname = varname
def render(self, context):
nav = template.resolve_variable(self.nav, context)
try:
if not isinstance(nav, Page):
page = Page.objects.get(pk=nav)
else:
page = nav
except Page.DoesNotExist:
context[self.varname] = None
else:
pages = Page.objects.in_navigation().filter(parent=page)
context[self.varname] = pages
return ''
def cms_subpages(parser, token):
tokens = token.contents.split()
if len(tokens) != 4:
raise template.TemplateSyntaxError, "'%s' tag requires three arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return CmsSubpagesNode(tokens[1], tokens[3])
cms_subpages = register.tag(cms_subpages)
class CmsNavigationNode(template.Node):
def __init__(self, level, varname):
self.level = int(level)
self.varname = varname
def render(self, context):
try:
path = template.resolve_variable('path', context)
except template.VariableDoesNotExist:
return ''
if self.level >= 0 and self.level <= len(path):
pages = Page.objects.in_navigation()
if self.level == 0:
pages = pages.filter(parent__isnull=True)
else:
pages = pages.filter(parent=path[self.level-1])
context[self.varname] = pages
else:
context[self.varname] = None
return ''
def cms_navigation_level(parser, token):
tokens = token.contents.split()
if len(tokens) != 4:
raise template.TemplateSyntaxError, "'%s' tag requires three arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return CmsNavigationNode(tokens[1], tokens[3])
cms_navigation_level = register.tag(cms_navigation_level)
class CmsPageContentNode(template.Node):
def __init__(self, item, varname):
self.item = item
self.varname = varname
def render(self, context):
page = template.resolve_variable(self.item, context)
context[self.varname] = page.get_content(context['language'])
return ''
def cms_pagecontent(parser, token):
tokens = token.contents.split()
if len(tokens) != 4:
raise template.TemplateSyntaxError, "'%s' tag requires three arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return CmsPageContentNode(tokens[1], tokens[3])
cms_pagecontent = register.tag(cms_pagecontent)
def cms_breadcrumbs(context, separator=None, style=None):
if 'page' in context and 'language' in context:
return {
'page': context['page'],
'language': context['language'],
'separator': separator,
'style': style,
}
cms_breadcrumbs = register.inclusion_tag('cms/breadcrumbs.html', takes_context=True)(cms_breadcrumbs)
class CmsLanguageLinksNode(template.Node):
def render(self, context):
page = context['page']
return ' '.join(['<a href="%s">%s</a>' % (page.get_absolute_url(code), dict(LANGUAGE_NAME_OVERRIDE).get(code, name)) for code, name in context['LANGUAGES'] if code != context['language']])
def cms_language_links(parser, token):
return CmsLanguageLinksNode()
cms_language_links = register.tag(cms_language_links)
class CmsLinkNode(template.Node):
def __init__(self, page, language=None, html=False):
self.page = page
self.language = language
self.html = html
def render(self, context):
page = template.resolve_variable(self.page, context)
language = self.language and template.resolve_variable('language', context) or get_language()
if isinstance(page, int):
try:
page = Page.objects.get(pk=page)
except Page.DoesNotExist:
return self.html and '<a href="#">(none)</a>' or '#'
link = page.get_absolute_url(language)
if self.html:
page_content = page.get_content(language)
extra_class = ''
try:
active_page = template.resolve_variable('page', context)
if active_page == page:
extra_class = ' class="active"'
elif page in active_page.get_path():
extra_class = ' class="active_path"'
except template.VariableDoesNotExist:
pass
return '<a%s href="%s">%s</a>' % (extra_class, link, escape(page_content and page_content.title or page.title))
else:
return link
def cms_link(parser, token):
tokens = token.split_contents()
return CmsLinkNode(tokens[1])
cms_link = register.tag(cms_link)
def cms_html_link(parser, token):
tokens = token.split_contents()
return CmsLinkNode(tokens[1], html=True)
cms_html_link = register.tag(cms_html_link)
def cms_language_link(parser, token):
tokens = token.split_contents()
return CmsLinkNode(tokens[1], tokens[2])
cms_language_link = register.tag(cms_language_link)
class CmsIsSubpageNode(template.Node):
def __init__(self, sub_page, page, nodelist):
self.sub_page = sub_page
self.page = page
self.nodelist = nodelist
def render(self, context):
sub_page = template.resolve_variable(self.sub_page, context)
page = template.resolve_variable(self.page, context)
if isinstance(page, int):
page = Page.objects.get(pk=page)
if isinstance(sub_page, int):
sub_page = Page.objects.get(pk=sub_page)
while sub_page:
if sub_page == page:
return self.nodelist.render(context)
sub_page = sub_page.parent
return ''
def if_cms_is_subpage(parser, token):
tokens = token.contents.split()
if len(tokens) != 3:
raise template.TemplateSyntaxError, "'%s' tag requires two arguments" % tokens[0]
nodelist = parser.parse(('end_if_cms_is_subpage',))
parser.delete_first_token()
return CmsIsSubpageNode(tokens[1], tokens[2], nodelist)
if_cms_is_subpage = register.tag(if_cms_is_subpage)
def yesno(value):
yesno_template = '<img src="%scms/img/%s" alt="%s" />'
if value == '':
return yesno_template % (settings.MEDIA_URL, 'icon-unknown.gif', _('Unknown'))
elif value:
return yesno_template % (settings.MEDIA_URL, 'icon-yes.gif', _('Yes'))
else:
return yesno_template % (settings.MEDIA_URL, 'icon-no.gif', _('No'))
cms_yesno = register.filter('cms_yesno', yesno)
def content_title(page, language):
return page.get_content(language).title
get_content_title = register.filter('cms_get_content_title', content_title)
class CmsPaginationNode(template.Node):
def __init__(self, nodelist, num_pages):
self.nodelist = nodelist
self.num_pages = num_pages
def render(self, context):
context['number_of_pages'] = self.num_pages
context['page_numbers'] = range(1, self.num_pages+1)
context['more_than_one_page'] = self.num_pages>1
return self.nodelist.render(context)
def cms_pagination(parser, token):
tokens = token.contents.split()
if len(tokens) != 2:
raise template.TemplateSyntaxError, "'%s' tag requires one argument" % tokens[0]
page = int(tokens[1])
if page < 1:
page = 1
num_pages = 1
the_nodelist = None
while True:
nodelist = parser.parse(('cms_new_page','cms_end_pagination'))
if num_pages == page:
the_nodelist = nodelist
token_name = parser.next_token().contents
#parser.delete_first_token()
if token_name == 'cms_end_pagination':
if not the_nodelist:
# Display the last page if the page number is too big
the_nodelist = nodelist
break
num_pages += 1
return CmsPaginationNode(the_nodelist, num_pages)
cms_pagination = register.tag(cms_pagination)
|
3,393 | 42d9f40dd50056b1c258508a6cb3f9875680276a | """empty message
Revision ID: 42cf7f6532dd
Revises: e6d4ac8564fb
Create Date: 2019-04-01 16:13:37.207305
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '42cf7f6532dd'
down_revision = 'e6d4ac8564fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default="false"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('stakeholder', 'archived')
# ### end Alembic commands ###
|
3,394 | 153c02585e5d536616ec4b69757328803ac2fb71 | #coding=utf-8
'''
Created on 2013-3-28
@author: jemmy
'''
import telnetlib
import getpass
import sys
import os
import time
import xlrd
from pyExcelerator import *
import
#define
Host = "192.168.0.1"
Port = "70001"
#Host = raw_iput("IP",)
username = "admin"
password = "admin"
filename = str(time.strftime('%Y%m%d%H%M%S'))
def telnet():
# product:
tn = telnetlib.Telnet(Host)
telnetlib.Telnet(Host, Port)
tn.read_until("Login: ")
tn.write(username + "\n")
tn.read_until("Password: ")
tn.write(password + "\n")
tn.write("meminfo \n")
tn.write("sh \n")
tn.write("cat /proc/meminfo \n")
tn.write("mpstat -P ALL \n")
tn.write("date \n")
tn.write("exit \n")
tn.write("exit \n")
return tn.read_all()
telnet()
time.sleep(5)
#define
def getlog(s):
print "getlog!---------------------------------------"
f = open('/home/' + filename, 'a')
f.write(s)
f.close()
#define
for i in range(1, 10000000):
print i
telnet()
log = str(telnet())
getlog(log)
time.sleep(5) |
3,395 | 2866ecf69969b445fb15740a507ddecb1dd1762d | # C8-06 p.146 Write city_country() function that takes name city and country
# Print city name then the country the city is in. call 3 times with differet pairs.
def city_country(city, country):
"""Name a city and the country it resides in seperated by a comma."""
print(f'"{city.title()}, {country.title()}"\n')
city_country("St. John's", 'Canada')
city_country("ottawa", "Ontario")
city_country('cairo', 'egypt') |
3,396 | 464fc2c193769eee86a639f73b933d5413be2b87 | from keyboards import *
from DB import cur, conn
from bot_token import bot
from limit_text import limit_text
def send_answer(question_id, answer_owner, receiver_tel_id, short):
answer = cur.execute('''SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''', (question_id, answer_owner)).fetchone()
keyboard = telebot.types.InlineKeyboardMarkup()
if answer is not None:
id, question_id, tel_id, answer, accepted_answer, rate_answer, photo, document, document_type, document_size, send_date = cur.execute(
'''SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''',
(question_id, answer_owner)).fetchone()
question_owner = \
cur.execute('''SELECT tel_id FROM Questions WHERE id = (%s)''', (question_id, )).fetchone()[0]
# Limiting Long Questions and specifying keyboard accordingly
# GETTING ADMINS AND TAs
role = cur.execute('''SELECT role FROM Users WHERE tel_id = (%s)''', (answer_owner, )).fetchone()[0]
# This flag is used at the bottom for Admin and TAs keyboard setting
short_message_flag = False
# Setting keyboard
if limit_text(answer):
short_message_flag = True
# SHOWMORE key
if short:
answer = limit_text(answer)
showkey = showmore
else:
showkey = showless
if receiver_tel_id == question_owner:
if accepted_answer:
keyboard.add(showkey)
else:
keyboard.add(showkey, accept_answer, next_page_answer)
else:
# FOLLOWERs and Answer Owner only get a show more key
keyboard.add(showkey)
else:
if receiver_tel_id == question_owner:
if not accepted_answer:
if question_owner == receiver_tel_id:
keyboard.add(accept_answer, next_page_answer)
# ATTACHMENTs
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'
.format(document_type, document_size)), callback_data='document')
keyboard.add(document_button)
# SETTING EMOJI BASED ON ACCEPTED OR NOT ACCEPTED ANSWER
if role in ['STUDENT', 'TA']:
if accepted_answer:
answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(question_id) + ' #' + \
str(answer_owner) + '\n\n' + answer + emoji.emojize('\n\n:high_voltage: Rated: {0}/5'.format(rate_answer))
else:
answer = emoji.emojize(':bright_button: #A_') + str(question_id) + ' #' + str(answer_owner) + '\n\n' + answer
if role == 'TA':
answer += emoji.emojize('\n\n:bust_in_silhouette: Sent by ') + role
## ADMINs AND TAs answers are indicated with a flag
elif role in ['ADMIN']:
question_state = cur.execute('''SELECT status FROM Questions WHERE id = (%s)''', (question_id,)).fetchone()[0]
# ADMIN Answers are different
keyboard = telebot.types.InlineKeyboardMarkup()
if short_message_flag:
# SHOWMORE key
if short:
showkey = showmore
else:
showkey = showless
keyboard.add(showkey)
else:
keyboard = None
# ATTACHMENTs
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
answer = emoji.emojize(':collision: #A_') + str(question_id) + ' #' + str(answer_owner) + '\n\n' \
+ answer + emoji.emojize('\n\n:bust_in_silhouette: Sent by ') + role
# Returning Answer and Two Keyboards
return (answer, keyboard) |
3,397 | 66e93295d2797ca9e08100a0a1f28619acb72aa4 | from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.dispatch import Signal
from djangochannelsrestframework.observer.base_observer import BaseObserver
class Observer(BaseObserver):
def __init__(self, func, signal: Signal = None, kwargs=None):
super().__init__(func)
if kwargs is None:
kwargs = {}
self.signal = signal
self.signal_kwargs = kwargs
self._serializer = None
self.signal.connect(self.handle, **self.signal_kwargs)
def handle(self, signal, *args, **kwargs):
message = self.serialize(signal, *args, **kwargs)
channel_layer = get_channel_layer()
for group_name in self.group_names_for_signal(*args, message=message, **kwargs):
async_to_sync(channel_layer.group_send)(group_name, message)
def group_names(self, *args, **kwargs):
yield "{}-{}-signal-{}".format(
self._uuid,
self.func.__name__.replace("_", "."),
".".join(
arg.lower().replace("_", ".") for arg in self.signal.providing_args
),
)
|
3,398 | 2345d1f72fb695ccec5af0ed157c0606f197009c | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains(r'^PermitRootLogin no$')
assert sshd.contains(r'^X11Forwarding no$')
assert sshd.contains(r'^UsePAM yes$')
assert sshd.contains(r'\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains(r'^User test$')
assert ssh.contains(r'^Host \*$')
assert ssh.contains(r'\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
|
3,399 | c2467e94a2ad474f0413e7ee3863aa134bf9c51f | """
TestRail API Categories
"""
from . import _category
from ._session import Session
class TestRailAPI(Session):
"""Categories"""
@property
def attachments(self) -> _category.Attachments:
"""
https://www.gurock.com/testrail/docs/api/reference/attachments
Use the following API methods to upload, retrieve and delete attachments.
"""
return _category.Attachments(self)
@property
def cases(self) -> _category.Cases:
"""
https://www.gurock.com/testrail/docs/api/reference/cases
Use the following API methods to request details about test cases and
to create or modify test cases.
"""
return _category.Cases(self)
@property
def case_fields(self) -> _category.CaseFields:
"""
https://www.gurock.com/testrail/docs/api/reference/case-fields
Use the following API methods to request details about custom fields
for test cases.
"""
return _category.CaseFields(self)
@property
def case_types(self) -> _category.CaseTypes:
"""
https://www.gurock.com/testrail/docs/api/reference/case-types
Use the following API methods to request details about case type.
"""
return _category.CaseTypes(self)
@property
def configurations(self) -> _category.Configurations:
"""
https://www.gurock.com/testrail/docs/api/reference/configurations
Use the following API methods to request details about configurations and
to create or modify configurations.
"""
return _category.Configurations(self)
@property
def milestones(self) -> _category.Milestones:
"""
https://www.gurock.com/testrail/docs/api/reference/milestones
Use the following API methods to request details about milestones and
to create or modify milestones.
"""
return _category.Milestones(self)
@property
def plans(self) -> _category.Plans:
"""
https://www.gurock.com/testrail/docs/api/reference/plans
Use the following API methods to request details about test plans and
to create or modify test plans.
"""
return _category.Plans(self)
@property
def priorities(self) -> _category.Priorities:
"""
https://www.gurock.com/testrail/docs/api/reference/priorities
Use the following API methods to request details about priorities.
"""
return _category.Priorities(self)
@property
def projects(self) -> _category.Projects:
"""
https://www.gurock.com/testrail/docs/api/reference/projects
Use the following API methods to request details about projects and
to create or modify projects
"""
return _category.Projects(self)
@property
def reports(self) -> _category.Reports:
"""
https://www.gurock.com/testrail/docs/api/reference/reports
Use the following methods to get and run reports that have been
made accessible to the API.
"""
return _category.Reports(self)
@property
def results(self) -> _category.Results:
"""
https://www.gurock.com/testrail/docs/api/reference/results
Use the following API methods to request details about test results and
to add new test results.
"""
return _category.Results(self)
@property
def result_fields(self) -> _category.ResultFields:
"""
https://www.gurock.com/testrail/docs/api/reference/result-fields
Use the following API methods to request details about custom fields
for test results.
"""
return _category.ResultFields(self)
@property
def runs(self) -> _category.Runs:
"""
https://www.gurock.com/testrail/docs/api/reference/runs
Use the following API methods to request details about test runs and
to create or modify test runs.
"""
return _category.Runs(self)
@property
def sections(self) -> _category.Sections:
"""
https://www.gurock.com/testrail/docs/api/reference/sections
Use the following API methods to request details about sections and
to create or modify sections.
Sections are used to group and organize test cases in test suites.
"""
return _category.Sections(self)
@property
def shared_steps(self) -> _category.SharedSteps:
"""
https://www.gurock.com/testrail/docs/api/reference/api-shared-steps
Use the following API methods to request details about shared steps.
"""
return _category.SharedSteps(self)
@property
def statuses(self) -> _category.Statuses:
"""
https://www.gurock.com/testrail/docs/api/reference/statuses
Use the following API methods to request details about test statuses.
"""
return _category.Statuses(self)
@property
def suites(self) -> _category.Suites:
"""
https://www.gurock.com/testrail/docs/api/reference/suites
Use the following API methods to request details about test suites and
to create or modify test suites.
"""
return _category.Suites(self)
@property
def templates(self) -> _category.Template:
"""
https://www.gurock.com/testrail/docs/api/reference/templates
Use the following API methods to request details about templates
(field layouts for cases/results)
"""
return _category.Template(self)
@property
def tests(self) -> _category.Tests:
"""
https://www.gurock.com/testrail/docs/api/reference/tests
Use the following API methods to request details about tests.
"""
return _category.Tests(self)
@property
def users(self) -> _category.Users:
"""
https://www.gurock.com/testrail/docs/api/reference/users
Use the following API methods to request details about users.
"""
return _category.Users(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.