seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71154449436 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 2 08:45:19 2021
@author: jbclark8
"""
# main program for ICM-DOM-PD for python
# a highly modularized code based on Cerco and Coel 1993 estuarine model
# of Chesapeake Bay
# Modified from Clark et al. 2020 to include more complex
# light reactions and photochemistry
# Version 0.0.1
# JB Clark, Aug 2021
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import Density
import functions
import DefVars as dv
import Light
import Sediment as sed
import Nutrients as nt
import DOM
import POM
import Algae
import Oxygen
import os
import glob
from matplotlib import cm
# Before Beginning, read all Instructions
# Make sure pyICM is in the Desktop, and if not change below
# This software is used blah blah blah licenses
# Find where we are on the computer and make sure it is the pyICM directory
HomeDir = os.path.expanduser('~') # get the home directory
ICMDir = HomeDir + "/Desktop/pyICM"
# Navigate to the home directory
os.chdir(ICMDir)
# # list all of the available input files for the stations
# WhichData = input("Hello!Welcome to ICM." "\n"
# "This current version is for Chesapeake Bay"
# "Station Data is read in and light and algae are dynamically\
# calculated. This is version 0.1"
# "Which location would you like to run for?" "\n"
# "The current options are Chesapeake or Yukon ---> ")
print("Hello! Welcome to ICM." "\n"
"This current version is for Chesapeake Bay" "\n"
"Station data is read variables are dynamically calculated. This is version 0.1")
WhichData = 'Chesapeake'
StatDir = ICMDir+"/Inputs/"+WhichData
StatFiles = sorted(glob.glob(StatDir+"/CBP/*.csv"))
print("These are the following files available")
# Show all available files
ii = 0
for ff in StatFiles:
f1 = ff.split('/')
f1 = f1[len(f1)-1]
f1 = f1.split("_")
print(ii+1, ". ", f1[0])
ii = ii+1
fnum = int(input("Which station file from the list would you like to run?"
"\n'"
"Enter number here ---> "))
WQ_fname = StatFiles[fnum-1]
print("File Selected is --> ", WQ_fname)
# get the station name from the file name, to match the weather data
Fin = WQ_fname.split('/')
Fin = Fin[len(Fin)-1]
Fin = Fin.split('_')
StatName = Fin[0]
# LON=float(input('What is the Longitude --> '))
# LAT=float(input('What is the Latitude --> ' ))
# yy=float(input('What is the year? --> '))
LAT = 38
LON = -76
yy = 2020
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%% Start pyICM %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# %%%%%%%%%%%%%%%%%%%% Read In CSV FIles from Model Input %%%%%%%%%%%%%%%%%%%%
# read in the time series data of water quality inputs
myTS = pd.read_csv(WQ_fname)
# read in spectral information for light attenuation
Light_fname = StatDir + "/CBay_Spectral.csv"
myLT = pd.read_csv(Light_fname)
# read in surface forcing information for light and weather
weather_fname = StatDir+"/Weather/"+StatName+"_Weather.csv"
mySFC = pd.read_csv(weather_fname)
errorcode = dv.InitializeVariables()
if(errorcode == 1):
print('All variables were not defined, there is a problem and ICM is\
stopping')
elif(errorcode == 0):
print('All global variables have been defined')
# Get the spatial dimensions and the Model Time
# calculate the domain volume, specified in DefVars
Volume = functions.CalcVolume(dv.X, dv.Y, dv.Z)
print('Volume =', Volume*1e-9, 'km^3')
# now set up the time vector for integration, using the forcing file time
modtimein = myTS.Time
lasttime = modtimein[len(modtimein)-1]
print('Start and End Days are = ', modtimein[0]/86400, lasttime/86400)
# set the time step here
DT = 3600.
Nsteps = int(lasttime/DT)
# now set up a time array with the appropriate time step
modtime = np.linspace(modtimein[0], lasttime, Nsteps)
current_time = modtime[0]
print('My Time Step is = ', DT, 'Seconds')
#total number of time steps
mylen = len(modtime)
# %%%%%%%%%%%% Important Variables to Potentially Change %%%%%%%%%%%%%%%%%%%%
#
# RIVER FLOW
Q = np.ones(mylen)*500.
# RIVER INPUTS
# Water Temperature
Tin = myTS.WTEMP
# Water Salinity
Sin = myTS.SALINITY
# Nitrogen
RivNH4in = myTS.NH4F
RivNO3in = myTS.NO23F
# Convert upstream chl a concentration into algae 1 and 2 carbon
RivAlgae1in = myTS.CHLA*0.5*dv.cchla1*1e-3
RivAlgae2in = myTS.CHLA*0.5*dv.cchla1*1e-3
# calculate the change in concentration due to river inputs
RiverISSin = myTS.TSS
# SURFACE FORCING
# wind velocity
Uin = mySFC.uwnd[0:365]
Vin = mySFC.vwnd[0:365]
# total downwelling shortwave flux and scale by 0.43 to remove IR
EdIn = mySFC.dswrf*0.43
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# LIGHT PARAMETERS
# wavelengths, nm
WL = myLT.Lambda
# absorption due to water, m^-1
aWater = myLT.aW
# mass specific absorption for each colored DOC, m^2 gC^-1
aC1 = myLT.aCDOC1
aC2 = myLT.aCDOC2
aC3 = myLT.aCDOC3
# take the averge for now
aCDOC = np.average(np.column_stack((aC1, aC2, aC3)), 1)
# mass specific absorption due to chla, m^2 mg chla^-1
aPhi = myLT.aPhi
# mass specific absorption due to particles, m^2 g^-1
aP = myLT.aP
# mass specific backscattering due to particles, m^2 g^-1
bbP = myLT.bbp
# spectral distribution of light, nm^-1
SpecDis = myLT.Spec_dist
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# interpolate all forcing to the model time step
T = np.interp(modtime, modtimein, Tin)
S = np.interp(modtime, modtimein, Sin)
Speed = np.sqrt(Uin ** 2 + Vin ** 2)
Uwind = np.interp(modtime, modtimein, Speed)
RivNH4 = np.interp(modtime, modtimein, RivNH4in)
RivNO3 = np.interp(modtime, modtimein, RivNO3in)
RivAlgae1 = np.interp(modtime, modtimein, RivAlgae1in)
RivAlgae1 = np.interp(modtime, modtimein, RivAlgae2in)
RiverISS = np.interp(modtime, modtimein, RiverISSin)
# %%%%%%%%%%%%%%%% INITIALIZE ALL WATER QUALITY VARIABLES %%%%%%%%%%%%%%%%%%
# After running the model a few times, can take the last value and input here
# phytoplankton 1 and 2 biomass (g C m^-3)
B1 = np.zeros(mylen)
B1[0] = 0.1
B2 = np.zeros(mylen)
B2[0] = 0.1
# phytoplankton 1 and 2 chla
# convert carbon to chla in micrograms L6-1
chla1 = B1 / dv.cchla1 * 1e3
chla2 = B2 / dv.cchla2 * 1e3
chla = chla1 + chla2
# #ammonium and nitrate concentration (g N m^-3)
NH4 = np.zeros(mylen)
NH4[0] = 0.05
NO3 = np.zeros(mylen)
NO3[0] = 0.1
# dissolved oxygeen
DO2 = np.zeros(mylen)
DO2[0] = 10.0
# temp var for DON remineralization
MNLDON = 0.0
# set up an array for KD, PAR, and NP_Total to collect and pass back to main
# from the light attenuation function
KD = np.zeros((mylen, len(WL)))
PAR = np.zeros(mylen)
KD_PAR = np.zeros(mylen)
NP_total = np.zeros((mylen, len(WL)))
Rrs_out = np.zeros((mylen, len(WL)))
SZA_out = np.zeros(mylen)
EdAvg = np.zeros((mylen, len(WL)))
# now find where WL == 400 and WL == 700 to mark off the PAR
dv.PAR_Id[0] = (np.abs(WL - 400.)).argmin()
dv.PAR_Id[1] = (np.abs(WL - 700.)).argmin()
# #colored and non-colored DOC concentration (g C m^-3)
CDOC = np.ones(mylen)
# NCDOC=np.zeros(mylen)
MNLDOC = 0.0
# #colored and non-colored DON concentration (g N m^-3)
# CDON=np.zeros(mylen)
# NCDON=np.zeros(mylen)
# #Labile and Refractory POC and PON in g C and g N m^-3
LPOC = np.zeros(mylen)
LPOC[0] = 0.5
RPOC = np.zeros(mylen)
RPOC[0] = 1.0
# LPON=np.zeros(mylen)
# RPON=np.zeros(mylen)
# inorganic suspended sediment (g m^-3)
ISS = np.zeros(mylen)
ISS[0] = 10.
# TSS = np.zeros(mylen)
TSS = ISS + (LPOC + RPOC) * 2.5
# Phytoplankton Rate Variables
SZA = np.zeros(mylen)
FI1 = np.zeros(mylen)
FI2 = np.zeros(mylen)
NL1 = np.zeros(mylen)
NL2 = np.zeros(mylen)
NPP1 = np.zeros(mylen)
NPP2 = np.zeros(mylen)
P1 = np.zeros(mylen)
P2 = np.zeros(mylen)
PR1 = np.zeros(mylen)
PR2 = np.zeros(mylen)
PN1 = np.zeros(mylen)
PN2 = np.zeros(mylen)
NT = np.zeros(mylen)
i = 1
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% BEGIN MAIN LOOP %%%%%%%%%%%%%%%%%%%%%%%%%%%%
while current_time < modtime[len(modtime)-1]:
JDAY = current_time/86400
MyDay = np.floor(JDAY)
# calculate the total absorption and the total scattering
aTotal = (aWater + chla[i-1] * aPhi +
CDOC[i-1] * aCDOC +
TSS[i-1] * aP)
bTotal = TSS[i-1] * bbP
Ed = EdIn[MyDay] * SpecDis
# call the light attenuation functions
KD[(i-1, )], PAR[i-1], NP_total[(i-1, )], KD_PAR[i-1], \
EdAvg[i-1], Rrs_out[(i-1, )], SZA[i-1] = \
Light.Light_Attenuation(WL, Ed,
aTotal, bTotal, dv.Z*0.1,
JDAY, LAT, LON, yy, T[i-1], S[i-1])
FRate = functions.CalcFlushingRate(Q[i-1], Volume)
# calculate the change in concentration due to biogeochemical processes
# first algal growth and death
Algae1 = Algae.DTB1(B1[i-1], NH4[i-1], NO3[i-1], T[i-1],
PAR[i-1], FRate, RivAlgae1[i-1], Q[i-1], Volume)
Algae2 = Algae.DTB2(B2[i-1], NH4[i-1], NO3[i-1], T[i-1],
PAR[i-1], FRate, RivAlgae1[i-1], Q[i-1], Volume)
# get some of the algae rates
FI1[i] = Algae1.FI
FI2[i] = Algae2.FI
NL1[i] = Algae1.NL
NL2[i] = Algae2.NL
NPP1[i] = Algae1.NPP
NPP2[i] = Algae2.NPP
P1[i] = Algae1.P1
P2[i] = Algae2.P2
PR1[i] = Algae1.PR1
PR2[i] = Algae2.PR2
PN1[i] = Algae1.PN
PN2[i] = Algae2.PN
# next inorganic nitrogen
NH4A = Algae1.NH4A+Algae2.NH4A
NO3A = Algae1.NO3A+Algae2.NO3A
DTNO3 = nt.DTNO3(NH4[i-1], NO3[i-1], T[i-1], DO2[i-1], NO3A,
Q[i-1], Volume, FRate, RivNO3[i-1])
DTNH4 = nt.DTNH4(NH4[i-1], NO3[i-1], T[i-1], DO2[i-1], MNLDON,
NH4A, Q[i-1], Volume, FRate, RivNH4[i-1])
# nitrification
NT[i] = nt.Nitrification(NH4[i-1], T[i-1], DO2[i-1])
# inorganic sediment
DTISS = sed.deltaISS(Q[i-1], Volume, FRate, RiverISS[i-1], ISS[i-1])
# next calculate dissolved oxygen
DTDO2 = Oxygen.DOXG(DO2[i-1], T[i], S[i], dv.Z,
PN1[i], PN2[i], P1[i], P2[i], PR1[i],
PR2[i], B1[i], B2[i],
NT[i], Uwind[i], MNLDOC, 0)
DO2[i] = DO2[i-1] + DTDO2*DT
# update the concentrations
ISS[i] = ISS[i-1]+DTISS*DT
B1[i] = B1[i-1]+Algae1.DTB1*DT
B2[i] = B2[i-1]+Algae2.DTB2*DT
chla1[i] = B1[i] / dv.cchla1 * 1e3
chla2[i] = B2[i] / dv.cchla2 * 1e3
chla[i] = chla1[i] + chla2[i]
NH4[i] = NH4[i-1]+DTNH4.DTNH4*DT
NO3[i] = NO3[i-1]+DTNO3.DTNO3*DT
TSS[i] = ISS[i] + B1[i] + B2[i] + (LPOC[i] + RPOC[i]) * 2.5
# update the time
current_time = modtime[i]
i = i+1
print('Congratulations!!!','\n'
'Model run is complete, plotting some things now')
# %%%%%%%%%%%%%%%%%%%%%%%%%% END MAIN PROGRAM %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Make a Figures Directory if not Exist
OutDir = ICMDir + "/Outputs"
FDir = OutDir + '/Figures'
if not os.path.isdir(OutDir):
path = os.path.join(ICMDir, 'Outputs')
os.mkdir(path)
if not os.path.isdir(FDir):
path = os.path.join(OutDir, 'Figures')
os.mkdir(path)
# now make some plots of whatever you like and save to the Figures Directory
plt.plot(modtime/86400, ISS)
plt.xlabel('Time')
plt.ylabel('$ISS (g m^{-3})$')
plt.savefig(FDir + '/ISS_ts.png')
plt.show()
plt.plot(modtime/86400, B1)
plt.xlabel('Time')
plt.ylabel('$Alg 1 (gC m^{-3})$')
plt.savefig(FDir + '/Algae2_ts.png')
plt.show()
plt.plot(modtime/86400, B2)
plt.xlabel('Time')
plt.ylabel('$Alg 2 (gC m^{-3})$')
plt.savefig(FDir + '/Algae2_ts.png')
plt.show()
plt.plot(modtime/86400, NH4)
plt.xlabel('Time')
plt.ylabel('$NH4 (gNm^{-3})$')
plt.savefig(FDir + '/NH4_ts.png')
plt.show()
plt.plot(modtime/86400, NO3)
plt.xlabel('Time')
plt.ylabel('$NO3 (gNm^{-3})$')
plt.savefig(FDir + '/NO3_ts.png')
plt.show()
plt.plot(modtime/86400, DO2)
plt.xlabel('Time')
plt.ylabel('$O2 (g m^{-3})$')
plt.savefig(FDir + '/DO2_ts.png')
plt.show()
plt.plot(modtime/86400, NPP1)
plt.xlabel('Time')
plt.ylabel('$Algae 1 NPP (g C m^{-2} d^{-1})$')
plt.savefig(FDir + '/NPP1_ts.png')
plt.show()
plt.plot(modtime/86400, NPP2)
plt.xlabel('Time')
plt.ylabel('$Algae 2 NPP (g C m^{-2} d^{-1})$')
plt.savefig(FDir + '/NPP2_ts.png')
plt.show()
X, Y = np.meshgrid(WL, modtime / 86400)
Z = Rrs_out
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.viridis,
linewidth=0, antialiased=False)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plt.savefig(FDir + '/Rrs_surface.png')
Z = KD
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.viridis,
linewidth=0, antialiased=False)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plt.savefig(FDir + '/Kd_surface.png')
print('Model Run is All Done, You Can Find Plots in the Outputs/Figures Directory')
| bclark805/pyICM | code/ICM_main.py | ICM_main.py | py | 12,956 | python | en | code | 0 | github-code | 50 |
19979045520 | """
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='+.[(;=Q07q6tB9UblTzVS{k>59g{t/AhOX?@8O$rq=_H%GxGGT')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', '10.0.0.1', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('wagtail.contrib.styleguide', )
ALLOWED_HOSTS = ('127.0.0.1', '10.0.0.133', 'localhost', 'www.dev.bluehut.ca', '192.168.0.106')
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'weekly_emails': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '{}/logs/weekly-email.log'.format(env.str('HOME')),
'formatter': 'verbose',
},
'social': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '{}/logs/social-media-posts.log'.format(env.str('HOME')),
'formatter': 'verbose',
},
'petpoint': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '{}/logs/petpoint-updates.log'.format(env.str('HOME')),
'formatter': 'verbose',
},
'petpoint-errors': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': '{}/logs/petpoint-errors.log'.format(env.str('HOME')),
'formatter': 'verbose',
},
},
'loggers': {
'bvspca.newsletter': {
'level': 'ERROR',
'handlers': ['weekly_emails'],
'propagate': False
},
'bvspca.social': {
'level': 'INFO',
'handlers': ['social'],
'propagate': False
},
'bvspca.animals.petpoint': {
'level': 'INFO',
'handlers': ['petpoint'],
'propagate': False
},
'bvspca.animals.petpoint.errors': {
'level': 'ERROR',
'handlers': ['petpoint-errors'],
'propagate': False
},
}
}
| nfletton/bvspca | config/settings/local.py | local.py | py | 4,814 | python | en | code | 8 | github-code | 50 |
22422593023 | import math
import random
class UncorrNoiseConso:
def __init__(self, muconso=None, sigmaConso=None):
self.e = math.exp(1)
self.need_init_ = sigmaConso is None or muconso is None # need to be initialized with the variation of loads
self.sigmaConso = sigmaConso
self.muconso = muconso
def need_init(self):
return self.need_init_
def init(self, muconso, sigmaConso):
self.muconso = muconso
self.sigmaConso = sigmaConso
self.need_init_ = False
def __call__(self, loads):
"""Compute the 'uncorrelated noise' for a single load"""
if self.need_init_:
msg = "Trying to use an un initialize uncorrelated noise"
raise RuntimeError(msg)
loads = [self.recoeverything(el, self.muconso) for el in loads]
return [load*random.lognormvariate(mu=1., sigma=self.sigmaConso)/self.e for load in loads]
# return load*random.lognormvariate(mu=1., sigma=self.sigmaConso)/self.e
def recoeverything(self, el, avg):
"""reconnect everything
If something was disconnected, its new value is drawn to be +- 50% of the avg value"""
res = el
if res == 0.:
res = avg*random.lognormvariate(mu=1., sigma=0.5)/self.e
return res
| BDonnot/data_generation | pgdg/UncorrNoise.py | UncorrNoise.py | py | 1,298 | python | en | code | 0 | github-code | 50 |
72883410075 | # Elabore um programa que efetue a apresentação do valor da conversão em dólar de um valor lido em real.
# O programa deve solicitar a cotação do dólar e também a quantidade de reais disponível com o usuário,
# para que seja apresentado o valor em moeda americana.
valor_cotacao = float(input('Digite o valor da cotacao em dolar: '))
valor_real_usuario = float(input('Digite o valor em real para conversao: '))
valor_convertido = valor_real_usuario / valor_cotacao
print(f'Segue valor de conversao para dolar: U$ {valor_convertido:.2f}') | TITYCOCO123HOT/VALTER | 13.py | 13.py | py | 552 | python | pt | code | 0 | github-code | 50 |
8338455192 | import lws
import librosa
import random
import numpy as np
import math
def _random_occlusion(mags, min_amount, max_amount):
width, height = mags.shape
def random_from_exp(amount_min, amount_max, exp):
return random.uniform(amount_min, math.sqrt(amount_max))**exp
min_width = int(min_amount * width)
max_width = int(max_amount * width)
min_height = int(min_amount * height)
max_height = int(max_amount * height)
noise_width = np.random.randint(min_width, max_width)
noise_height = np.random.randint(min_height, max_height)
x_start = np.random.randint(width - noise_width)
y_start = int((random_from_exp(0, 10, 2) / 10) * (height - noise_height))
x_end = x_start + noise_width
y_end = y_start + noise_height
shape = mags[x_start:x_end:, y_start:y_end:].shape
noise = np.random.normal(loc=np.mean(mags),
scale=np.std(mags),
size=shape)
mags[x_start:x_end:, y_start:y_end:] = noise
return mags
def _multiplacative_noise(mags, high=1.0):
high = np.random.random_sample() * high
noise = np.random.uniform(high=high, size=mags.shape)
return noise.astype(mags.dtype) * mags
def _additive_noise(mags, high=1.0):
high = np.random.random_sample() * high
noise = np.random.uniform(high=high, size=mags.shape)
return noise.astype(mags.dtype) + mags
def _random_masking(mags, masking_prob):
masking_prob = np.random.random_sample() * masking_prob
mask = np.random.choice([0, 1], size=mags.shape,
p=[masking_prob, 1.0 - masking_prob])
return mask.astype(mags.dtype) * mags
class OnlineAudioAugmentor():
def __init__(self):
self.prob_random_occlusion = 0
self.prob_additive_noise = 0
self.prob_multiplacative_noise = 0
self.prob_random_masking = 0
def random_occlusion(self, prob, min_amount, max_amount):
self.prob_random_occlusion = prob
self.min_amount_random_occlusion = min_amount
self.max_amount_random_occlusion = max_amount
def additive_noise(self, prob, high):
self.prob_additive_noise = prob
self.high_additive_noise = high
def multiplacative_noise(self, prob, high):
self.prob_multiplacative_noise = prob
self.high_multiplacative_noise = high
def random_masking(self, prob, masking_prob):
self.prob_random_masking = prob
self.masking_prob_random_masking = masking_prob
def augment(self, mags):
batch_size = len(mags)
if mags.ndim != 3:
raise ValueError('augment expects a batch of magnitudes with a dimensionality of 3.')
for i in range(batch_size):
if np.random.random_sample() < self.prob_multiplacative_noise:
mags[i] = _multiplacative_noise(mags[i], self.high_multiplacative_noise)
if np.random.random_sample() < self.prob_random_occlusion:
mags[i] = _random_occlusion(mags[i],
self.min_amount_random_occlusion,
self.max_amount_random_occlusion)
if np.random.random_sample() < self.prob_additive_noise:
mags[i] = _additive_noise(mags[i], self.high_additive_noise)
if np.random.random_sample() < self.prob_random_masking:
mags[i] = _random_masking(mags[i], self.masking_prob_random_masking)
return mags | fedden/pytorch_seq2seq_audio | lws_comparison/augmentor.py | augmentor.py | py | 3,594 | python | en | code | 9 | github-code | 50 |
15328769814 | # -*- coding: utf-8 -*-
"""
Created on Sat May 09 17:04:16 2015
@author: Gonçalo
"""
import pandas as pd
import matplotlib.pyplot as plt
from infotables import names,control,lesion,lesionordermap
from activitytables import posturebias, getballistictrials, info_key
from activitytables import normalize, mediannorm, flipleftwards
from shuttlingplots import averagetimetrajectory, proxylegend
from datapath import jumpers, lesionshamcache, crossings_key
from datapath import crossingactivity_random_key
# Load data
nonjumpers = str.format("subject not in {0}",jumpers)
info = pd.read_hdf(lesionshamcache, info_key)
namemap = lesionordermap(info)
info = info.query(nonjumpers)
cract = pd.read_hdf(lesionshamcache,crossingactivity_random_key)
cr = pd.read_hdf(lesionshamcache,crossings_key).query(nonjumpers)
cract.reset_index('time',inplace=True)
# Select data
bias = 2
group = list(names(info))
random = '(session == 13 and trial > 20) or (14 <= session < 17)'
cr = cr.query(random)
for name in group:
selection = str.format("subject in {0}",[name])
scr = cr.query(selection)
stablebias,unstablebias = posturebias(scr,n=bias)
stablebias = getballistictrials(stablebias)
unstablebias = getballistictrials(unstablebias)
if len(stablebias) == 0 or len(unstablebias) == 0:
continue
# Select data
stablebias = stablebias.rename(columns={'index':'crossing'})
unstablebias = unstablebias.rename(columns={'index':'crossing'})
stablebias.set_index('crossing',append=True,inplace=True)
unstablebias.set_index('crossing',append=True,inplace=True)
scract = cract.join(stablebias,how='inner',rsuffix='R')
ucract = cract.join(unstablebias,how='inner',rsuffix='R')
scract.xhead = flipleftwards(scract.xhead,scract.side)
ucract.xhead = flipleftwards(ucract.xhead,ucract.side)
sb_S = scract.query('stepstate3')
sb_U = scract.query('not stepstate3')
ub_S = ucract.query('stepstate3')
ub_U = ucract.query('not stepstate3')
# Plot data
name = namemap[name]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
averagetimetrajectory(sb_S,color='b',ax=ax,alpha=0.5)
averagetimetrajectory(sb_U,color='cyan',ax=ax,alpha=0.5)
averagetimetrajectory(ub_S,color='orange',ax=ax,alpha=0.5)
averagetimetrajectory(ub_U,color='r',ax=ax,alpha=0.5)
proxylegend(['b','cyan','orange','r'],
['stable bias [stable]',
'stable bias [unstable]',
'unstable bias [stable]',
'unstable bias [unstable]'],
ax=ax,loc='upper left')
# ax.set_ylim(0,3.5)
ax.set_title(str.format('{0} (n = {1} trials)',name,
len(stablebias)+len(unstablebias)))
#plt.title(str.format('stable (n = {0} trials)',len(cr.query(stable))))
plt.show()
# Save plot
| kampff-lab/shuttling-analysis | paper/figures/randombias3dtrajectory.py | randombias3dtrajectory.py | py | 2,935 | python | en | code | 0 | github-code | 50 |
25181212306 | from tkinter import *
from game import game
from tkinter import messagebox
def rules():
root = Tk() # open main window (here "root" works as an object)
root.title('Rock Paper Scissor Lizard and Spock')
root.resizable(False, False)
root.config(background='black')
bg = PhotoImage(file='play.png')
label_instructions = Label(root, text='Rules/Simple way to win the game...\n ->Spock smashes Scissors\n'
'->Scissor cuts Paper\n->Paper covers Rock\n->Rock crushes Lizard\n'
'->Lizard poisons Spock\n->Scissor decapitates Lizard\n->Rock crushes Scissor\n'
'->Spock vaporizes Rock\n->Lizard eats Paper\n->Paper disproves Spock',
font=('arial', 20, 'bold'), fg='gold', bg='black')
label_instructions.grid(row=5, column=0)
run = True
def reset(): # If user want to restart the game they can play again
global run # global key word allows you to use the variable outside for current scope
answer = messagebox.askyesno('ALERT', 'LET\'S START THE GAME')
if answer is True:
run = False
root.destroy() # destroy() function destroys the current window
game() # starts new game
Button(root, width=150, height=150, image=bg, font=('arial', 20, 'bold'), bg='black',
borderwidth=0, command=lambda: reset()).grid(row=5, column=6)
root.mainloop()
# rules()
| MeghanaKallepalli/Rock-Paper-Scissor-Lizard-Spock | Rules.py | Rules.py | py | 1,560 | python | en | code | 0 | github-code | 50 |
32008112238 | import math
from shapely.geometry import Polygon, LineString
coords_tile = dict()
def coord_from_tile(x, y=None, level=14):
n = 2 ** level
if y is None:
s = x.split('_')
x = int(s[0])
y = int(s[1])
lat = math.atan(math.sinh(math.pi * (1 - 2 * y / n))) * 180.0 / math.pi
lon = x / n * 360.0 - 180.0
return lat, lon
def geom_from_tile(x, y):
return [list(coord_from_tile(x, y))[::-1], list(coord_from_tile(x + 1, y + 1))[::-1]]
def tile_from_coord(lat, lon, output="list"):
n = 2 ** 14
x = math.floor(n * (lon + 180) / 360)
lat_r = lat * math.pi / 180
y = math.floor(n * (1 - (math.log(math.tan(lat_r) + 1 / math.cos(lat_r)) / math.pi)) / 2)
if output == "list":
return x, y
else:
return "{}_{}".format(x, y)
def tile_to_line_string(x, y, level=17):
geometry = [list(coord_from_tile(x, y, level))[::-1], list(coord_from_tile(x + 1, y + 1, level))[::-1]]
lonW = min([x[0] for x in geometry])
lonE = max([x[0] for x in geometry])
latS = min([x[1] for x in geometry])
latN = max([x[1] for x in geometry])
nw = (lonW, latN)
ne = (lonE, latN)
se = (lonE, latS)
sw = (lonW, latS)
return LineString([nw, ne, se, sw, nw])
def tile_id_to_line_string(tile_id, level=17):
(x, y) = [int(a) for a in tile_id.split("_")]
return tile_to_line_string(x, y, level)
def tile_to_polygon(x, y, level=17):
return Polygon(tile_to_line_string(x, y, level))
def tile_id_to_polygon(tile_id, level=17):
return Polygon(tile_id_to_line_string(tile_id, level))
class Tile(object):
def __init__(self, x, y):
self.x = x
self.y = y
geometry = geom_from_tile(self.x, self.y)
self.lonW = min([x[0] for x in geometry])
self.lonE = max([x[0] for x in geometry])
self.latS = min([x[1] for x in geometry])
self.latN = max([x[1] for x in geometry])
self.polygon = Polygon(
[(self.lonW, self.latN), (self.lonW, self.latS), (self.lonE, self.latS), (self.lonE, self.latN)])
def __repr__(self):
return "Tile {0.x}_{0.y}".format(self)
def geom_to_tiles(geom):
tiles_inner = set()
tiles_outer = set()
(min_x, min_y, max_x, max_y) = geom.bounds
tile_min_x, tile_min_y = tile_from_coord(min_y, min_x)
tile_max_x, tile_max_y = tile_from_coord(max_y, max_x)
if tile_max_x < tile_min_x:
tmp = tile_min_x
tile_min_x = tile_max_x
tile_max_x = tmp
if tile_max_y < tile_min_y:
tmp = tile_min_y
tile_min_y = tile_max_y
tile_max_y = tmp
for x in range(tile_min_x, tile_max_x + 1):
for y in range(tile_min_y, tile_max_y + 1):
t = Tile(x, y)
if geom.intersects(t.polygon):
tiles_outer.add((x, y))
if geom.contains(t.polygon):
tiles_inner.add((x, y))
return tiles_inner, tiles_outer
| BenoitBouillard/computeFillRatio | common/tile.py | tile.py | py | 2,953 | python | en | code | 0 | github-code | 50 |
38554558108 | import time
import math
from datetime import timedelta, tzinfo
class UTC(tzinfo):
"""
UTC timezone to set for UTC datetime.
"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
# timeframe to str map (double: str)
TIMEFRAME_TO_STR_MAP = {
0: 't',
1: '1s',
10: '10s',
30: '30s',
60: '1m',
2*60: '2m',
3*60: '3m',
5*60: '5m',
10*60: '10m',
15*60: '15m',
30*60: '30m',
45*60: '45m',
60*60: '1h',
2*60*60: '2h',
3*60*60: '3h',
4*60*60: '4h',
6*60*60: '6h',
8*60*60: '8h',
24*60*60: '1d',
2*24*60*60: '2d',
3*24*60*60: '3d',
7*24*60*60: '1w',
30*24*60*60: '1M'
}
# timeframe reverse map (str: double)
TIMEFRAME_FROM_STR_MAP = {v: k for k, v in TIMEFRAME_TO_STR_MAP.items()}
def timeframe_to_str(timeframe):
return TIMEFRAME_TO_STR_MAP.get(timeframe, "")
def timeframe_from_str(timeframe):
return TIMEFRAME_FROM_STR_MAP.get(timeframe, 0.0)
def matching_symbols_set(configured_symbols, available_symbols):
"""
Special '*' symbol mean every symbol.
Starting with '!' mean except this symbol.
Starting with '*' mean every wildchar before the suffix.
@param available_symbols List containing any supported markets symbol of the broker. Used when a wildchar is defined.
"""
if '*' in configured_symbols:
# all instruments
watched_symbols = set(availables)
# except...
for configured_symbol in configured_symbols:
if configured_symbol.startswith('!'):
# ignore, not wildchar, remove it
watched_symbols.remove(configured_symbol[1:])
else:
watched_symbols = set()
for configured_symbol in configured_symbols:
if configured_symbol.startswith('*'):
# all ending symbols name with...
suffix = configured_symbol[1:]
for symbol in available_symbols:
# except...
if symbol.endswith(suffix) and ('!'+symbol) not in configured_symbols:
watched_symbols.add(symbol)
elif not configured_symbol.startswith('!'):
# not ignored, not wildchar
watched_symbols.add(configured_symbol)
return watched_symbols
def fix_thread_set_name():
try:
import threading
import prctl
def set_thread_name(name): prctl.set_name(name)
def _thread_name_hack(self):
set_thread_name(self.name)
threading.Thread._bootstrap_original(self)
threading.Thread._bootstrap_original = threading.Thread._bootstrap
threading.Thread._bootstrap = _thread_name_hack
except ImportError:
print('WARN: prctl module is not installed. You will not be able to see thread names')
def set_thread_name(name): pass
def truncate(number, digits) -> float:
stepper = pow(10.0, digits)
return math.trunc(stepper * number) / stepper
def decimal_place(value):
return -int(math.floor(math.log10(value)))
def basetime(tf, timestamp):
if tf < 7*24*60*60:
# simplest
return int(timestamp / tf) * tf
elif tf == 7*24*60*60:
# must find the UTC first day of week
dt = datetime.utcfromtimestamp(timestamp)
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=UTC()) - timedelta(days=dt.weekday())
return dt.timestamp()
elif tf == 30*24*60*60:
# replace by first day of month at 00h00 UTC
dt = datetime.utcfromtimestamp(timestamp)
dt = dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=UTC())
return dt.timestamp()
return 0
| simhaonline/siis-rev | connectors/common/utils.py | utils.py | py | 3,814 | python | en | code | 2 | github-code | 50 |
32759579838 | import datetime
import io
import json
import logging
import os
import pydoc
import sys
import time
from types import SimpleNamespace
from typing import Any
import jsonargparse
import xarray as xr
LOGGER = logging.getLogger("pplbench")
def load_class_or_exit(class_name: str) -> Any:
"""
Load the given `class_name` or exit the process.
:param class_name: The class to be loaded.
:returns: a class object
"""
class_ = pydoc.locate(class_name)
if class_ is None:
LOGGER.error(f"class `{class_name}` not found. Exiting!`")
sys.exit(1)
LOGGER.debug(f"loaded class `{class_}`")
return class_
class SimpleNamespaceEncoder(json.JSONEncoder):
"""define class for encoding config object"""
def default(self, object):
if isinstance(object, SimpleNamespace):
return object.__dict__
elif isinstance(object, jsonargparse.Path):
return {}
else:
# call base class implementation which takes care of
# raising exceptions for unsupported types
return json.JSONEncoder.default(self, object)
def create_output_dir(config: SimpleNamespace) -> str:
"""
Create an output directory for storing the benchmark results and write the config file to it.
:param config: the experiment configuration
:returns: a directory name for storing outputs
"""
root_dir = getattr(config, "output_root_dir", os.path.join(".", "outputs"))
if not os.path.isdir(root_dir):
os.mkdir(os.path.join(".", "outputs"))
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
"%Y-%m-%d_%H:%M:%S"
)
output_dir = create_subdir(root_dir, timestamp)
# dump the config file in the output directory
with open(os.path.join(output_dir, "config.json"), "w") as fp:
fp.write(convert_to_readable_string(SimpleNamespaceEncoder().encode(config)))
# redirect C stdout and stderr to files to avoid cluttering user's display
# but keep Python stdout and stderr intact
for sname in ["stdout", "stderr"]:
py_stream = getattr(sys, sname)
# save the current stream's file descriptor
saved_fd = os.dup(py_stream.fileno())
# redirect the current stream's file descriptor to a log file
log_fd = os.open(
os.path.join(output_dir, f"{sname}.txt"), os.O_WRONLY | os.O_CREAT
)
os.dup2(log_fd, py_stream.fileno())
# now restor the Python stream to the saved file descriptor
setattr(sys, sname, io.TextIOWrapper(os.fdopen(saved_fd, "wb")))
return output_dir
def create_subdir(output_dir: str, name: str) -> str:
"""
Create a subdirectory and return its name.
:param output_dir: directory to make a subdir under
:name: subdir name
:returns: name of new subdirectory
"""
subdir_name = os.path.join(output_dir, name)
os.mkdir(subdir_name)
LOGGER.debug(f"Created subdir {subdir_name}")
return subdir_name
def save_dataset(output_dir: str, name_prefix: str, ds: xr.Dataset) -> None:
"""
Saves the dataset in both NetCDF binary format as well as a readable CSV.
The NetCDF files will be written to `output_dir/name_prefix.nc`
A separate CSV file will be written to `output_dir/name_prefix/varname.csv`
for each data variable `varname` in the dataset.
:param output_dir: directory to save dataset
:param name_prefix: prefix to be added before file names
:param ds: dataset to be saved
"""
ds.to_netcdf(os.path.join(output_dir, name_prefix + ".nc"))
subdir = create_subdir(output_dir, name_prefix)
for varname in ds.data_vars.keys():
getattr(ds, str(varname)).to_dataframe().to_csv(
os.path.join(subdir, f"{varname}.csv")
)
LOGGER.info(f"saved {name_prefix}")
def convert_to_readable_string(input: str):
intermediate = ""
for character in input:
if character == " ":
pass
else:
intermediate = intermediate + character
indent = " "
indent_level = 0
output = ""
for character in intermediate:
if character == "}" or character == "]":
indent_level = indent_level - 1
output = output + "\n" + indent * indent_level + character
elif character == "{" or character == "[":
indent_level = indent_level + 1
output = output + character + "\n" + indent * indent_level
elif character == ",":
output = output + character + "\n" + indent * indent_level
elif character == ":":
output = output + character + " "
else:
output = output + character
output = output + "\n"
return output
| facebookresearch/pplbench | pplbench/lib/utils.py | utils.py | py | 4,740 | python | en | code | 92 | github-code | 50 |
3210785300 | import unittest
class Node(object):
def __init__(self, val=None, next=None, prev=None, child=None):
self.val = val
self.next = next
self.prev = prev
self.child = child
class LinkedList(object):
def __init__(self, head=None):
self.head = head
self.tail = head
self.size = 0
if head is not None:
self.size += 0
def get(self, index):
"""
:type index: int
:rtype: int
"""
if index < 0 or index >= self.size:
return -1
cur = self.head
for _ in range(index):
cur = cur.next
return cur.val
def addAtHead(self, val):
"""
:type val: int
:rtype: None
"""
n = Node(val)
if not self.head:
self.head = self.tail = n
else:
self.head.prev = n
n.next = self.head
self.head = n
self.size += 1
return
def addAtTail(self, val):
"""
:type val: int
:rtype: None
"""
n = Node(val)
if not self.head:
self.head = self.tail = n
else:
n.prev = self.tail
self.tail.next = n
self.tail = n
self.size += 1
def addAtIndex(self, index, val):
"""
:type index: int
:type val: int
:rtype: None
"""
if index < 0 or index > self.size:
return
if index == 0:
self.addAtHead(val)
elif index == self.size:
self.addAtTail(val)
else:
cur = self.head
for _ in range(index):
cur = cur.next
n = Node(val)
n.prev = cur.prev
n.next = cur
cur.prev.next = n
cur.prev = n
self.size += 1
return
def deleteAtIndex(self, index):
"""
:type index: int
:rtype: None
"""
if index < 0 or index >= self.size:
return
if self.size == 1:
self.head = self.tail = None
elif index == 0:
self.head.next.prev, self.head = None, self.head.next
elif index == self.size - 1:
self.tail.prev.next, self.tail = None, self.tail.prev
else:
cur = self.head
for _ in range(index):
cur = cur.next
cur.next.prev, cur.prev.next = cur.prev, cur.next
self.size -= 1
return
def build_dll(self, input_list):
for i in input_list:
self.addAtTail(i)
return
class TestDLL(unittest.TestCase):
def test_dll(self):
myLinkedList = LinkedList()
myLinkedList.addAtHead(1)
myLinkedList.addAtTail(3)
myLinkedList.addAtIndex(1, 2)
self.assertEqual(myLinkedList.get(1), 2)
myLinkedList.deleteAtIndex(1)
self.assertEqual(myLinkedList.get(1), 3)
| EugeneStill/PythonCodeChallenges | helpers/doubly_linked_list.py | doubly_linked_list.py | py | 2,990 | python | en | code | 0 | github-code | 50 |
12974027203 |
ANUSVAARA = 'ANUSVAARA'
VISARGA = 'VISARGA'
ANUNAASIKA = 'ANUNAASIKA'
ACH_HRASVA = 'ACH_HRASVA'
ACH_DEERGHA = 'ACH_DEERGHA'
HAL = 'HAL'
# akshara_suffix = ANUSVAARA | VISARGA
# deergha_an = (ACH_DEERGHA ANUNAASIKA) | (ACH_DEERGHA)
# deergha = (deergha_an) | deergha_an
# hrasva_an = (ACH_HRASVA ANUNAASIKA) | (ACH_HRASVA)
# hrasva = (hrasva_an) | hrasva_an
EOF = 'EOF'
TOKEN_CHARS = {
'ANUSVAARA': ['M'],
'VISARGA': ['H'],
'ANUNAASIKA': ['.N'],
'ACH_HRASVA': 'a|i|u|RRi|LLi'.split('|'),
'ACH_DEERGHA': 'A|I|U|RRI|LLI|E|ai|O|au'.split('|'),
'HAL': 'k|kh|g|gh|~N|c|ch|j|jh|~n|T|Th|D|Dh|N|t|th|d|dh|n|p|ph|b|bh|m|y|r|l|v|sh|Sh|s|h'.split('|'),
}
AKSHARA = (ANUNAASIKA, ANUSVAARA, VISARGA, ACH_HRASVA, ACH_DEERGHA, HAL)
HAL_DEERGHA = 'HAL_DEERGHA'
HAL_HRASVA = 'HAL_HRASVA'
HRASVA_AKSHARA = 'HRASVA_AKSHARA'
DEERGHA_AKSHARA = 'DEERGHA_AKSHARA'
#
# DEERGHA_GURU = (DEERGHA_AKSHARA | (DEERGHA_AKSHARA HAL) | (DEERGHA_AKSHARA akshara_suffix)) [GURU]
# HRASVA_GURU = ((HRASVA_AKSHARA HAL) | (HRASVA_AKSHARA akshara_suffix)) [GURU]
# LAGHU = HRASVA_AKSHARA
# HRASVA_GANA = HRASVA_GURU [GURU] | LAGHU [LAGHU]
# GANA = DEERGHA_GURU [GURU] | HRASVA_GANA [LAGHU]
# LINE = GANA*
# YA = LAGHU GURU GURU
# RA = GURU LAGHU GURU
# TA = GURU GURU LAGHU
# BHA = GURU LAGHU LAGHU
# JA = LAGHU GURU LAGHU
# SA = LAGHU LAGHU GURU
# MA = GURU GURU GURU
# NA = LAGHU LAGHU LAGHU
class Token(object):
def __init__(self, type, value, pos):
self.type = type
self.value = value
self.position = pos
def __str__(self):
"""String representation of the class instance.
Examples:
Token(INTEGER, 3)
Token(PLUS '+')
"""
return 'Token({type}, {value}, {pos})'.format(
type=self.type,
value=repr(self.value),
pos=self.position
)
def __repr__(self):
return self.__str__()
class LexException(Exception):
pass
class Lexer(object):
def __init__(self, text):
self.text = text
self.pos = 0
self.current_char = self.text[self.pos]
self.pending_token = None
def error(self):
raise LexException('Error lexing input "%s" at position %s'%(self.current_char, self.pos), self.pos, self.current_char)
def advance(self, num=1):
"""Advance the `pos` pointer and set the `current_char` variable."""
self.pos += num
if self.pos > len(self.text) - 1:
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def skip_whitespace(self):
while self.current_char is not None and self.current_char.isspace():
self.advance()
def _internal_next_token(self):
text = self.text
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
val = None
if len(text) >= self.pos+3:
if self.current_char == 'R' and text[self.pos+1:self.pos+3] in ('Ri', 'RI'):
# RRi, RRI
val = text[self.pos:self.pos+3]
elif self.current_char == 'L' and text[self.pos+1:self.pos+3] in ('Li', 'LI'):
# LLi, LLI
val = text[self.pos:self.pos+3]
if (val is None) and (len(text) >= self.pos+2):
if self.current_char == 'a' and text[self.pos+1] in ('iu'):
# ai, au
val = text[self.pos:self.pos+2]
elif self.current_char in ('kgcjTDtdpbsS') and text[self.pos+1] == 'h':
# check if followed by h
val = text[self.pos:self.pos+2]
elif self.current_char == '.' and text[self.pos+1] == 'N':
# .N
val = text[self.pos:self.pos+2]
elif self.current_char == '~' and text[self.pos+1] in 'nN':
# ~N, ~n
val = text[self.pos:self.pos+2]
if val is None:
val = self.current_char
for t in AKSHARA:
if val in TOKEN_CHARS[t]:
p = self.pos
self.advance(len(val))
return Token(t, val, p)
self.error()
return Token(EOF, None, self.pos)
def get_next_token(self):
if self.pending_token is not None:
tok = self.pending_token
self.pending_token = None
else:
tok = self._internal_next_token()
if tok.type == HAL:
tok2 = self._internal_next_token()
if tok2.type == ACH_HRASVA:
return Token(HRASVA_AKSHARA, tok.value+tok2.value, tok.position)
elif tok2.type == ACH_DEERGHA:
return Token(DEERGHA_AKSHARA, tok.value+tok2.value, tok.position)
# remember tok2
self.pending_token = tok2
return tok
elif tok.type == ACH_HRASVA:
return Token(HRASVA_AKSHARA, tok.value, tok.position)
elif tok.type == ACH_DEERGHA:
return Token(DEERGHA_AKSHARA, tok.value, tok.position)
return tok
class ParseException(Exception):
pass
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
self.current_token = self.lexer.get_next_token()
self.nodeVal = ''
def error(self):
raise ParseException('Error parsing token %s'%self.current_token, self.current_token)
def eat(self, token_type):
if self.current_token.type == token_type:
self.current_token = self.lexer.get_next_token()
else:
self.error()
def anunaasika(self):
tok = self.current_token
self.eat(ANUNAASIKA)
self.nodeVal += tok.value
def deergha_akshara(self):
tok = self.current_token
self.eat(DEERGHA_AKSHARA)
self.nodeVal += tok.value
def hrasva_akshara(self):
tok = self.current_token
self.eat(HRASVA_AKSHARA)
self.nodeVal += tok.value
def akshara_suffix(self):
"""akshara_suffix = ANUSVAARA | VISARGA"""
tok = self.current_token
if self.current_token.type == ANUSVAARA:
self.eat(ANUSVAARA)
self.nodeVal += tok.value
elif self.current_token.type == VISARGA:
self.eat(VISARGA)
self.nodeVal += tok.value
else:
self.error()
def suffix_hal(self):
tok = self.current_token
self.eat(HAL)
self.nodeVal += tok.value
def deergha_guru(self):
self.deergha_akshara()
if self.current_token.type == ANUNAASIKA:
self.anunaasika()
if self.current_token.type in (ANUSVAARA, VISARGA):
self.akshara_suffix()
elif self.current_token.type == HAL:
self.suffix_hal()
out = ('GURU(%s)'%(self.nodeVal))
self.nodeVal = ''
return out
def hrasva_guru(self):
self.hrasva_akshara()
if self.current_token.type == ANUNAASIKA:
self.anunaasika()
if self.current_token.type in (ANUSVAARA, VISARGA):
self.akshara_suffix()
elif self.current_token.type == HAL:
self.suffix_hal()
out = ('GURU(%s)'%(self.nodeVal))
self.nodeVal = ''
return out
def hrasva_gana(self):
self.hrasva_akshara()
if self.current_token.type == ANUNAASIKA:
self.anunaasika()
out = None
if self.current_token.type in (ANUSVAARA, VISARGA):
self.akshara_suffix()
out = 'GURU(%s)'
elif self.current_token.type == HAL:
self.suffix_hal()
out = 'GURU(%s)'
else:
out = 'LAGHU(%s)'
if not out:
self.error()
ret = out%self.nodeVal
self.nodeVal = ''
return ret
def prefixHal(self):
tok = self.current_token
self.eat(HAL)
self.nodeVal += tok.value
def detectGana(self):
"""
Use for detecting
:return:
"""
while self.current_token.type == HAL:
self.prefixHal()
if self.current_token.type == DEERGHA_AKSHARA:
return self.deergha_guru()
elif self.current_token.type == HRASVA_AKSHARA:
return self.hrasva_gana()
self.error()
def guru(self):
"""
Use only for verifying
:return:
"""
while self.current_token.type == HAL:
self.prefixHal()
if self.current_token.type == DEERGHA_AKSHARA:
return self.deergha_guru()
elif self.current_token.type == HRASVA_AKSHARA:
return self.hrasva_guru()
self.error()
def laghu(self):
"""
Use only for verifying
:return:
"""
while self.current_token.type == HAL:
self.prefixHal()
self.hrasva_akshara()
if self.current_token.type == HAL:
self.error()
ret = 'LAGHU(%s)'%self.nodeVal
self.nodeVal = ''
return ret
def parse(self, pattern=None):
ganas = []
gana_methods = {
'L':self.laghu, 'G':self.guru
}
if pattern:
pattern = pattern.upper()
for g in pattern:
ganas.append(gana_methods[g]())
if self.current_token.type != EOF:
self.error()
else:
while self.current_token.type != EOF:
ganas.append(self.detectGana())
return ganas
def testLexer(text):
lex = Lexer(text)
while True:
token = lex.get_next_token()
print(token)
if token.type == 'EOF':
break
def detectGanas(text, pattern=None):
"""
Detect the ganas in `text` if no `pattern` is specified.
Check if the `text` matches the gana pattern specified in `pattern`.
`text` -- a string with sanskrit text in ITRANS transliteration format.
`pattern` -- a string the characters 'L' and 'G'.
Eg:- 'GGLGGLLGLGGL' is the pattern for indra vajra meter
"""
lex = Lexer(text)
p = Parser(lex)
# print('======'+text+'======')
try:
ganas = p.parse(pattern=pattern)
print(ganas)
if pattern:
print('Matched')
except LexException as le:
msg, pos, char = le.args
print(text)
print(' '*(pos-1), '^')
print(msg)
except ParseException as pe:
msg, token = pe.args
print(text)
print(' '*(token.position-1), '^')
print(msg)
def main():
print('===============================')
print('SANSKRIT CHANDAS GANA DETECTION')
print('===============================')
print('\nThe program runs in two modes:')
print('1. Print gana pattern of verses')
print('2. Check if a verse matches a given gana pattern')
mode = int(input('Enter choice>'))
pattern = None
if mode == 2:
while not pattern:
print('A gana pattern is specified as a string of L and G characters')
print('L for Laghu(short); G for Guru(long)')
print('''Eg:- 'GGLGGLLGLGG' is the pattern for the indra vajra meter''')
pattern = input('Enter pattern to detect (Eg. LGGLLGLG)>')
pattern = pattern.upper()
pchars = ''.join(sorted(set(pattern)))
if pchars != 'GL':
pattern = None
print('Invalid spec for gana pattern')
while True:
text = input('Enter sanskrit text in ITRANS format (q to exit)>\n')
if text in 'qQ':
break
detectGanas(text, pattern=pattern)
if __name__ == '__main__':
main()
# print('OmaruNAcala')
# testLexer('OmaruNAcala')
# print('Om na mO bha ga va tE shrI ra ma NA ya')
# testLexer('OmnamObhagavatEshrIramaNAyabhOH')
#
# # testLexer('syAdindravajrAyadidaujagaugaH')
#
# # testLexer('yaMvaidikAmantradRRishaHpurANAH')
#
# # testLexer('upEndravajrAjatajAstatOgau')
#
# # testLexer('yaMvaidikAmantradRRishaHpurANAH')
#
# # testLexer('indraMyamaMmAtarishvAnamAhuH')
#
# # testLexer('vEdAntinOnirvacanIyamEkaM')
#
# # testLexer('yaMbrahmashabdEnavinirdishanti')
#
# # testLexer('shaivAyamIshaMshiva ityavOcan')
#
# # testLexer('yaMvaiShNavAviShNuritistuvanti')
#
#
# # testLexer('OmbhUrbhuvassuvaHtatsaviturvarENyambhargOdEvasyadhImahIdhiyOyOnaHpracOdayAt')
# testLexer('yaMvaidikAmantradRRishaHpurANAH')
# detectGanas('yaMvaidikAmantradRRishaHpurANAHa', 'GGLGGLLGLGGL')
# testLexer('indraMyamaMmAtarishvAnamAhuH')
# detectGanas('indraMyamaMmAtarishvAnamAhuH', 'GGLGGLLGLGG')
| ramprax/sanskrit-chandas-ganas | ganas.py | ganas.py | py | 13,329 | python | en | code | 1 | github-code | 50 |
26397167556 | class IterMain(object):
def __iter__(self):
print('return an iterator')
# global instA
return K
class Iterator(object):
def __init__(self, i):
self.num = i
def __next__(self):
self.num = self.num + 1
if self.num <= 10:
return self.num
else:
raise StopIteration()
K = Iterator(0)
for instB in IterMain():
print(instB)
| steve3ussr/PyCharmProject | RunnobBasic/iter_class_test.py | iter_class_test.py | py | 419 | python | en | code | 0 | github-code | 50 |
32008128378 | import os
from pathlib import Path
import json
from common.config import load_users, GEN_PUBLIC_PATH, load_config, GEN_ZONES, GEN_USERS, GEN_RESULTS, GEN_PATH, \
PUBLIC_PATH, GEN_COMMUNITY
from common.statshunters import tiles_from_activities
from common.zones import load_zones_outer
from common.fileutils import FileCheck
from common.squares import get_max_square, compute_max_cluster, compute_clusters
from common.kmlutils import shapely_to_geojson
from shapely.ops import unary_union
from shapely import geometry
from common.tile import Tile
import geojson
from common.tile import coord_from_tile
import re
import argparse
from nominatim_api import nominatim_get_description
parser = argparse.ArgumentParser(description='Generate clusters')
parser.add_argument('-u', '--user', dest="user", default=None, help="for a specific user")
parser.add_argument('-f', '--force', dest="force", action="store_true", help="Force regeneration")
args = parser.parse_args()
user = vars(args)['user']
force = vars(args)['force']
users = load_users()
if user:
users = filter(lambda u: u['name'] == user, users)
config = load_config()
outer_zones = load_zones_outer()
with open(os.path.join(GEN_PATH, 'zones_desc.json'), 'r') as hr:
zones_desc = json.load(hr)
zones_name = {}
for zone, zone_file in config['zones'].items():
country = None
for c, r in config['countries'].items():
if re.match(r, zone):
country = c
break
zones_name[zone] = {
'code': zone,
'name': zone_file.split('/')[-1].split('.')[0],
'country': country
}
for country, cdesc in zones_desc.items():
for zone, zdesc in cdesc['zones'].items():
zones_name[zdesc['id']] = {
'code': zdesc['id'],
'name': zone,
'country': country
}
result_dict = {}
bbi_config = {
"count": lambda r: len(r),
"eddington": lambda r: eddigton(r, lambda x: x['visited']),
"eddington10": lambda r: eddigton(r, lambda x: x['visited'] / 10),
"squares": lambda r: sum([z['square'] for z in r.values()]),
"eddingtonSquare": lambda r: eddigton(r, lambda x: x['square'])
}
bbi_results = []
def gen_geojson(output_file, explored_tiles=None, zone_tiles=None, limits_file=None, max_square=None, cluster=None, with_toponym=False):
sc = []
if limits_file:
with open(limits_file, 'r') as fP:
limit = geojson.load(fP)
sc.append(geojson.Feature(geometry=limit, properties={"kind": "zone_limit"}))
if zone_tiles:
non_explored_tiles_zone = zone_tiles - explored_tiles
non_explored_geojson = shapely_to_geojson(geometry.MultiPolygon([Tile(*t).polygon for t in non_explored_tiles_zone]))
sc.append(geojson.Feature(geometry=non_explored_geojson,
properties={"kind": "unvisited", "size": len(non_explored_tiles_zone) }))
geometry_tiles = unary_union([Tile(*t).polygon for t in explored_tiles])
explored_geojson = shapely_to_geojson(geometry_tiles)
sc.append(geojson.Feature(geometry=explored_geojson,
properties={"kind": "visited",
"size": len(explored_tiles) }))
if cluster:
if isinstance(cluster, set):
cluster = [cluster]
for i in range(len(cluster)):
g = unary_union([Tile(*t).polygon for t in cluster[i]])
if with_toponym:
center = g.centroid
name = nominatim_get_description(center.y, center.x)
else:
name = None
sc.append(geojson.Feature(geometry=shapely_to_geojson(g),
properties={"kind": "cluster" if i == 0 else "sub-cluster",
"size": len(cluster[i]),
"name": name
}))
if max_square:
ms1 = coord_from_tile(max_square[0], max_square[1], 14)
ms2 = coord_from_tile(max_square[0] + max_square[2], max_square[1] + max_square[2], 14)
g = geometry.Polygon([[ms1[1], ms1[0]], [ms1[1], ms2[0]], [ms2[1], ms2[0]],[ms2[1], ms1[0]], [ms1[1], ms1[0]]])
if with_toponym:
center = g.centroid
name = nominatim_get_description(center.y, center.x)
else:
name = None
sc.append(geojson.Feature( geometry=shapely_to_geojson(g),
properties={"kind": "max_square",
"size": max_square[2],
"name": name
}))
geojson_collection = geojson.FeatureCollection(sc)
with FileCheck(output_file) as h:
h.write(geojson.dumps(geojson_collection))
def eddigton(data, value):
eddington = 1
while True:
if len(list(filter(lambda x: value(x) >= eddington, data.values()))) < eddington:
break
eddington += 1
return eddington - 1
def generate_user(user):
user_json_filename = os.path.join(GEN_USERS, user['name'], user['name'] + ".json")
Path(os.path.join(GEN_USERS, user['name'])).mkdir(exist_ok=True, parents=True)
url_uid = user['url'].split('/')[-1]
explored_tiles = tiles_from_activities(url_uid, filter_fct=lambda act: 'Virtual' not in act['type'])
try:
with open(user_json_filename, 'r') as hr:
previous_result = json.load(hr)
except:
previous_result = None
if (not force) and previous_result:
if previous_result.get("visited", 0) == len(explored_tiles):
# no change
print("No change for "+user['name'])
return previous_result, explored_tiles
print("Treat "+user['name'])
max_square = get_max_square(explored_tiles)
clusters = compute_clusters(explored_tiles)[0:10]
geojson_filename = os.path.join(GEN_USERS, user['name'], user['name'] + ".geojson")
gen_geojson(geojson_filename, explored_tiles=explored_tiles, limits_file=None, max_square=max_square, cluster=clusters, with_toponym=True)
user_result = {
'user': user['name'],
'visited': len(explored_tiles),
'square': max_square[2],
'cluster': len(clusters[0]),
'geojson': os.path.relpath(geojson_filename, PUBLIC_PATH),
'zones': {}
}
for zone in outer_zones:
zone_tiles = outer_zones[zone]
explored_tiles_zone = zone_tiles & explored_tiles
if not explored_tiles_zone:
continue
if not force:
try:
if previous_result['zones'][zone]['visited'] == len(explored_tiles_zone):
# no change
print("No change for " + user['name'] + " / " + zone)
user_result['zones'][zone] = previous_result['zones'][zone]
continue
except:
pass
zone_max_square = get_max_square(explored_tiles_zone)
path = config['zones'][zone].replace("%GEN_ZONES%", GEN_ZONES).replace('.kml', '.geojson')
geojson_filename = os.path.join(GEN_USERS, user['name'], user['name'] + '_' + zone + ".geojson")
gen_geojson(geojson_filename, explored_tiles=explored_tiles_zone, zone_tiles=outer_zones[zone],
limits_file=path, max_square=zone_max_square)
user_result['zones'][zone] = {
'zone': zones_name[zone],
'user': user['name'],
'visited': len(explored_tiles_zone),
'total': len(zone_tiles),
'ratio': round(100.0 * len(explored_tiles_zone) / len(zone_tiles), 2),
'square': zone_max_square[2],
'geojson': os.path.relpath(geojson_filename, PUBLIC_PATH)
}
# BBI
fr_results = {k: v for k, v in user_result['zones'].items() if re.match("[0-9].*", k)}
user_result['bbi'] = {k: v(fr_results) for k, v in bbi_config.items()}
with FileCheck(user_json_filename) as h:
h.write(geojson.dumps(user_result, indent=2))
return user_result, explored_tiles
geoms_users = []
community_tiles = set()
for user in users:
result_dict[user['name']], user_tiles = generate_user(user)
ur = {'name': user['name'], 'rank': 1}
ur.update(result_dict[user['name']]['bbi'])
bbi_results.append(ur)
community_tiles |= user_tiles
# geom_z = unary_union([Tile(*t).polygon for t in user_tiles])
# geoms_users.append(geom_z)
# geom_z = unary_union(geoms_users)
max_square = get_max_square(community_tiles)
clusters = compute_clusters(community_tiles)[0:10]
geojson_filename = os.path.join(GEN_USERS, user['name'], user['name'] + ".geojson")
gen_geojson(os.path.join(GEN_RESULTS, "kikourou_tiles.geojson"),
explored_tiles=community_tiles,
max_square=max_square,
cluster=clusters, with_toponym=True)
# with open(os.path.join(GEN_RESULTS, "kikourou_tiles.geojson"), "w") as h:
# h.write(geojson.dumps(shapely_to_geojson(geom_z)))
fields_results = {}
for f in bbi_config.keys():
fields_results[f] = sorted([u[f] for u in bbi_results], reverse=True)
for user in bbi_results:
rank = fields_results[f].index(user[f])
user["rank_"+f] = rank + 1
user["rank"] += rank
with FileCheck(os.path.join(GEN_PUBLIC_PATH, "users.json")) as hF:
hF.write(json.dumps(result_dict, indent=2))
with FileCheck(os.path.join(GEN_PUBLIC_PATH, "bbi.json")) as hF:
hF.write(json.dumps(bbi_results, indent=2))
# USERS BY ZONES
zones_users_results = { k:[] for k in outer_zones.keys()}
for user in result_dict.values():
for zone in user['zones'].values():
zones_users_results[zone['zone']['code']].append(zone)
for zone in zones_users_results.values():
zone.sort(key=lambda z:z['visited'], reverse=True)
# COMMUNITY ZONES
community_zones = {}
for country in config['countries']:
community_zones[country] = {'name': country, 'zones': {}}
c_outer_zones = load_zones_outer(re_filter=config['countries'][country])
all_tiles_country = set()
for zt in c_outer_zones.values():
all_tiles_country |= zt
community_tiles_country = community_tiles & all_tiles_country
zone_results = []
for zone in c_outer_zones:
community_tiles_zone = community_tiles_country & c_outer_zones[zone]
zone_results.append([zone, len(community_tiles_zone), len(c_outer_zones[zone]),
len(community_tiles_zone) / len(c_outer_zones[zone]) * 100])
geojson_filename = config['zones'][zone].replace("%GEN_ZONES%", GEN_COMMUNITY).replace('.kml', '_community.geojson')
limits = config['zones'][zone].replace("%GEN_ZONES%", GEN_ZONES).replace('.kml', '.geojson')
gen_geojson(geojson_filename, explored_tiles=community_tiles_zone, zone_tiles=c_outer_zones[zone], limits_file=limits)
community_zones[country]['zones'][zone] = {
'zone': zones_name[zone],
'visited': len(community_tiles_zone),
'size': len(c_outer_zones[zone]),
'users': zones_users_results.get(zone, []),
'geojson': os.path.relpath(geojson_filename, PUBLIC_PATH)
}
community_zones[country]['all'] = {
'visited': len(community_tiles_country),
'size': len(all_tiles_country)
}
with FileCheck(os.path.join(GEN_PUBLIC_PATH, "community_zones.json")) as hF:
hF.write(json.dumps(community_zones, indent=2))
with FileCheck(os.path.join(GEN_PUBLIC_PATH, "zones_users.json")) as hF:
hF.write(json.dumps(zones_users_results, indent=2))
| BenoitBouillard/computeFillRatio | data_json_gen.py | data_json_gen.py | py | 11,667 | python | en | code | 0 | github-code | 50 |
21019828079 | import collections
import six
from sqlian import Parsable, Sql, is_single_row
from sqlian.utils import (
is_flat_tuple, is_flat_two_tuple,
is_non_string_sequence, is_partial_of,
)
from .compositions import Assign, Join, List, Ordering
from .expressions import (
Condition, Identifier, Value,
get_condition_classes,
And, Equal, In,
)
__all__ = [
'Clause', 'IdentifierClause',
'Select', 'From', 'Where', 'On', 'Using',
'GroupBy', 'OrderBy', 'Limit', 'Offset',
'InsertInto', 'Columns', 'Values',
'Update', 'Set',
'DeleteFrom',
]
class Clause(Parsable):
def __init__(self, *children):
super(Clause, self).__init__()
self.children = list(children)
def __repr__(self):
return '{}({})'.format(
type(self).__name__,
', '.join(repr(c) for c in self.children),
)
def __sql__(self, engine):
if not self.children:
return self.sql_name
arg_sql = Sql(', ').join(engine.as_value(c) for c in self.children)
if not self.sql_name:
return arg_sql
return Sql('{} {}').format(Sql(self.sql_name), arg_sql)
@classmethod
def parse(cls, value, engine):
# This is a rare case we extend parse(). Clauses contribute to the
# output SQL, and therefore we need to make sure inner values are
# wrapped in a Clause so the result SQL contains correct keywords.
parsed = super(Clause, cls).parse(value, engine)
if isinstance(parsed, Clause):
return parsed
return cls(parsed)
class IdentifierClause(Clause):
@classmethod
def parse_native(cls, value, engine):
return cls(Identifier.parse(value, engine))
class Select(Clause):
sql_name = 'SELECT'
@classmethod
def parse_native(cls, value, engine):
# Special case: 2-string-tuple is AS instead of a sequence of columns.
if is_flat_two_tuple(value):
return cls(Identifier.parse(value, engine))
if is_non_string_sequence(value):
return cls(*(Identifier.parse(v, engine) for v in value))
if not isinstance(value, six.string_types):
return cls(value)
return cls(Identifier.parse(value, engine))
def parse_from_argument(value, engine):
if is_flat_tuple(value) and all(callable(v) for v in value[1:]):
item = Identifier.parse(value[0], engine)
for v in value[1:]:
item = v(item)
return item
return Identifier.parse(value, engine)
class From(Clause):
sql_name = 'FROM'
@classmethod
def parse_native(cls, value, engine):
# Special case: (Any, Join, ...) tuple is JOIN, not from-item sequence.
if (is_flat_tuple(value) and
all(is_partial_of(v, Join) for v in value[1:])):
return cls(parse_from_argument(value, engine))
if is_non_string_sequence(value):
return cls(*(parse_from_argument(v, engine) for v in value))
return cls(parse_from_argument(value, engine))
def parse_pair_as_condition(pair, engine, rho_klass):
key, value = pair
condition_classes = get_condition_classes()
# Explicit tuple operator.
if is_flat_two_tuple(key):
key, klass = key
if not isinstance(klass, Condition):
try:
klass = condition_classes[str(klass).upper()]
except KeyError:
raise ValueError('invalid operator {!r}'.format(klass))
return klass(
Identifier.parse(key, engine),
rho_klass.parse(value, engine),
)
# Parse in-key operator.
for op, klass in condition_classes.items():
if key.upper().endswith(' {}'.format(op)):
return klass(
Identifier.parse(key[:-(len(op) + 1)], engine),
rho_klass.parse(value, engine),
)
# Auto-detect operator based on right-hand value.
parsed = rho_klass.parse(value, engine)
if isinstance(parsed, List):
klass = In
else:
klass = Equal
return klass(Identifier.parse(key, engine), parsed)
def parse_as_condition(value, engine, rho_klass=Value):
if isinstance(value, collections.Mapping):
value = value.items()
elif not isinstance(value, collections.Sequence):
return value
if is_single_row(value) and len(value) == 2:
return parse_pair_as_condition(value, engine, rho_klass=rho_klass)
return And(*(
parse_pair_as_condition((key, value), engine, rho_klass=rho_klass)
for key, value in value
))
class Where(Clause):
sql_name = 'WHERE'
@classmethod
def parse_native(cls, value, engine):
return cls(parse_as_condition(value, engine))
class GroupBy(IdentifierClause):
sql_name = 'GROUP BY'
class OrderBy(Clause):
sql_name = 'ORDER BY'
@classmethod
def parse_native(cls, value, engine):
# Parse explicit operator tuple.
if is_flat_two_tuple(value):
return cls(Ordering(Identifier.parse(value[0], engine), value[1]))
# Parse in-key ordering.
for ordering in Ordering.allowed_orderings:
if value.upper().endswith(' {}'.format(ordering)):
return cls(Ordering(
Identifier.parse(value[:-(len(ordering) + 1)], engine),
ordering,
))
# Treat this like a ref name.
return cls(Identifier.parse(value, engine))
class Limit(Clause):
sql_name = 'LIMIT'
class Offset(Clause):
sql_name = 'OFFSET'
class InsertInto(IdentifierClause):
sql_name = 'INSERT INTO'
class Columns(Clause):
sql_name = ''
@classmethod
def parse_native(cls, value, engine):
return cls(List(*(Identifier.parse(n, engine) for n in value)))
class Values(Clause):
sql_name = 'VALUES'
@classmethod
def parse_native(cls, value, engine):
if not value:
return cls(List())
if is_single_row(value):
value = [value]
return cls(*(List(*row) for row in value))
class Update(IdentifierClause):
sql_name = 'UPDATE'
class Set(Clause):
sql_name = 'SET'
@classmethod
def parse_native(cls, value, engine):
if isinstance(value, collections.Mapping):
value = value.items()
elif not isinstance(value, collections.Sequence):
return cls(value)
if is_single_row(value) and len(value) == 2:
k, v = value
return cls(Assign(Identifier.parse(k, engine), v))
return cls(*(
Assign(Identifier.parse(k, engine), v)
for k, v in value
))
class DeleteFrom(IdentifierClause):
sql_name = 'DELETE FROM'
class On(Clause):
sql_name = 'ON'
@classmethod
def parse_native(cls, value, engine):
return cls(parse_as_condition(value, engine, rho_klass=Identifier))
class Using(Clause):
sql_name = 'USING'
@classmethod
def parse_native(cls, value, engine):
if not is_non_string_sequence(value):
value = [value]
return cls(List(*(Identifier.parse(v, engine) for v in value)))
| uranusjr/sqlian | sqlian/standard/clauses.py | clauses.py | py | 7,189 | python | en | code | 0 | github-code | 50 |
22354097666 | import sys
import csv
from collections import OrderedDict
from time import sleep
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtGui import QIcon
from mainwindow import Ui_MainWindow
emit_inc = 1000
class CSVEditor(Ui_MainWindow):
def __init__(self, window):
super().__init__()
self.setupUi(window)
window.show()
self.statusbar.hide()
self.page.setEnabled(False)
window.setWindowIcon(QIcon('ui/icon.png'))
self.actionQuit.triggered.connect(QtWidgets.qApp.quit)
self.actionOpen_csv.triggered.connect(self.open_file)
self.actionSave_csv.triggered.connect(self.save_file)
self.actionAbout.triggered.connect(self.about_stack)
self.doneButton.clicked.connect(self.home_stack)
self.addButton.clicked.connect(self.add_fields)
self.removeButton.clicked.connect(self.remove_fields)
self.read_thread = None
self.write_thread = None
self.old_file = None
self.old_file_name = None
@staticmethod
def error_message(exception,
text="An error occurred when processing your request",
title="Error"):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle(title)
msg.setDetailedText(str(exception))
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def wait(self):
if self.write_thread:
while not self.write_thread.isFinished():
sleep(1)
self.write_thread.quit()
if self.read_thread:
while not self.read_thread.isFinished():
sleep(1)
self.read_thread.quit()
def open_file(self):
file_type = "csv"
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
file_name, _ = QtWidgets.QFileDialog.getOpenFileName(
caption="Open", directory="",
filter="{} Files (*.{});;All Files (*)".format(
file_type.upper(), file_type),
options=options)
if file_name:
self.old_file_name = file_name
self.page.setEnabled(True)
self.page.setToolTip("")
self.fileNameLabel.setText("File: " + file_name)
self.read_original_file()
def save_file(self):
file_type = "csv"
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(
caption="Save", directory="",
filter="{} Files (*.{});;All Files (*)".format(
file_type.upper(), file_type),
options=options)
if file_name:
if '.' not in file_name and file_name[-4:] != '.' + file_type:
file_name += '.' + file_type
self.write_file(file_name)
def write_file(self, new_file_name):
field_names = []
for index in range(self.newList.count()):
field_names.append(self.newList.item(index).text())
self.wait()
self.write_thread = WriteFile(self.old_file_name, new_file_name,
field_names, self.newRowCount.value())
self.newRowCount.setValue(0)
self.write_thread.new_row_set.connect(self.new_row_set)
self.write_thread.exception.connect(self.error_message)
self.write_thread.start()
def home_stack(self):
self.stackedWidget.setCurrentIndex(0)
def about_stack(self):
self.stackedWidget.setCurrentIndex(1)
def read_original_file(self):
try:
with open(self.old_file_name, "r", encoding="utf-8", newline='') as f:
reader = csv.DictReader(f)
self.originalRowCount.setValue(0)
self.originalList.clear()
self.newList.clear()
self.newRowCount.setValue(0)
self.originalList.addItems(reader.fieldnames)
self.wait()
self.read_thread = UpdateRowCount(self.old_file_name)
self.read_thread.original_row_set.connect(self.original_row_set)
self.read_thread.update_new_row.connect(self.update_new_row)
self.read_thread.exception.connect(self.error_message)
self.read_thread.start()
except Exception as e:
self.error_message(e)
def add_fields(self):
current_items = []
for i in range(self.newList.count()):
current_items.append(self.newList.item(i).text())
for item in self.originalList.selectedItems():
if item.text() not in current_items:
self.newList.addItem(item.text())
self.originalList.clearSelection()
def remove_fields(self):
for row in self.newList.selectedItems():
self.newList.takeItem(self.newList.row(row))
def original_row_set(self, value):
self.originalRowCount.setValue(value)
def new_row_set(self, value):
self.newRowCount.setValue(value)
def update_new_row(self):
self.newRowCount.setValue(self.originalRowCount.value())
class UpdateRowCount(QThread):
original_row_set = pyqtSignal(int)
update_new_row = pyqtSignal()
exception = pyqtSignal(Exception)
def __init__(self, file_name):
super().__init__()
self.file_name = file_name
def run(self):
read_count = 0
try:
with open(self.file_name, 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
read_count += 1
if read_count % emit_inc == 0:
self.original_row_set.emit(read_count)
self.original_row_set.emit(read_count)
self.update_new_row.emit()
except Exception as e:
self.exception.emit(e)
class WriteFile(QThread):
new_row_set = pyqtSignal(int)
exception = pyqtSignal(Exception)
def __init__(self, old_file, new_file, fields, count):
super().__init__()
self.old_file_name = old_file
self.new_file_name = new_file
self.fields = fields
self.count = count
def run(self):
written_count = 0
try:
if self.new_file_name == self.old_file_name:
raise FileExistsError("Can't overwrite files. Give the new "
"file a different name")
with open(self.new_file_name, 'w', encoding='utf-8', newline='') as new_file:
writer = csv.DictWriter(new_file, fieldnames=self.fields)
writer.writeheader()
with open(self.old_file_name, 'r', encoding='utf-8') as \
old_file:
reader = csv.DictReader(old_file)
for row in reader:
if written_count == self.count:
break
new_row = OrderedDict()
for field in self.fields:
new_row[field] = row[field]
writer.writerow(new_row)
written_count += 1
if written_count % emit_inc == 0:
self.new_row_set.emit(written_count)
self.new_row_set.emit(written_count)
except Exception as e:
self.exception.emit(e)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
main_window = QtWidgets.QMainWindow()
ui = CSVEditor(main_window)
sys.exit(app.exec_())
| ScriptSmith/csveditor | csveditor.py | csveditor.py | py | 7,838 | python | en | code | 5 | github-code | 50 |
3032408596 | import libomni as robot #Library tha handles all the serial commands to arduino AtMega
import time
import serial
import math
import numpy as np
robot.enablePID(1)
count = 0
#ultrasonic setup
us = [0,0,0,0,0,0]
d = [0,0,0,0,0,0]
#odemetry setup
oldEncoder0 = 0
oldEncoder1 = 0
oldEncoder2 = 0
newEncoder0 = 0
newEncoder1 = 0
newEncoder2 = 0
#odemetry position
current_x = 0
current_y = 0
current_theta = 0
#vector
v = [0,0,0,1]
flag =[0,0,0,0,0,0]
##################################################### Obstacle Position
def us0(d0,xc,yc,thetac):
T_mat_robot_world= np.array([np.cos(thetac),-np.sin(thetac),0,xc,np.sin(thetac),np.cos(thetac),0,yc,0,0,1,0,0,0,0,1]).reshape(4,4)
T_mat_sen0_robot = np.array([-1,0,0,-0.33,0,-1,0,0,0,0,1,0,0,0,0,1]).reshape(4,4)
P_obs0_sen0 = np.array([d0,0,0,1]).reshape(4,1)
P_obs0_robot = np.dot(T_mat_sen0_robot,P_obs0_sen0)
P_obs0_world = np.dot(T_mat_robot_world,P_obs0_robot)
P_obs0 = np.concatenate([P_obs0_robot,P_obs0_world])
return P_obs0_robot
def us1(d1,xc,yc,thetac):
T_mat_robot_world= np.array([np.cos(thetac),-np.sin(thetac),0,xc,np.sin(thetac),np.cos(thetac),0,yc,0,0,1,0,0,0,0,1]).reshape(4,4)
T_mat_sen1_robot = np.array([0,-1,0,0,1,0,0,0.33,0,0,1,0,0,0,0,1]).reshape(4,4)
P_obs1_sen1 = np.array([d1,0,0,1]).reshape(4,1)
P_obs1_robot = np.dot(T_mat_sen1_robot,P_obs1_sen1)
P_obs1_world = np.dot(T_mat_robot_world,P_obs1_robot)
P_obs1 = np.concatenate([P_obs1_robot,P_obs1_world])
return P_obs1_robot
def us2(d2,xc,yc,thetac):
T_mat_robot_world= np.array([np.cos(thetac),-np.sin(thetac),0,xc,np.sin(thetac),np.cos(thetac),0,yc,0,0,1,0,0,0,0,1]).reshape(4,4)
T_mat_sen2_robot = np.array([np.sqrt(3)/2,-1/2,0,0.3556*(np.sqrt(3)/2),1/2,np.sqrt(3)/2,0,0.3556/2,0,0,1,0,0,0,0,1]).reshape(4,4)
P_obs2_sen2 = np.array([d2,0,0,1]).reshape(4,1)
P_obs2_robot = np.dot(T_mat_sen2_robot,P_obs2_sen2)
P_obs2_world = np.dot(T_mat_robot_world,P_obs2_robot)
P_obs2 = np.concatenate([P_obs2_robot,P_obs2_world])
return P_obs2_robot
def us3(d3,xc,yc,thetac):
T_mat_robot_world= np.array([np.cos(thetac),-np.sin(thetac),0,xc,np.sin(thetac),np.cos(thetac),0,yc,0,0,1,0,0,0,0,1]).reshape(4,4)
T_mat_sen3_robot = np.array([1,0,0,0.33,0,1,0,0,0,0,1,0,0,0,0,1]).reshape(4,4)
P_obs3_sen3 = np.array([d3,0,0,1]).reshape(4,1)
P_obs3_robot = np.dot(T_mat_sen3_robot,P_obs3_sen3)
P_obs3_world = np.dot(T_mat_robot_world,P_obs3_robot)
P_obs3 = np.concatenate([P_obs3_robot,P_obs3_world])
return P_obs3_robot
def us4(d4,xc,yc,thetac):
T_mat_robot_world= np.array([np.cos(thetac),-np.sin(thetac),0,xc,np.sin(thetac),np.cos(thetac),0,yc,0,0,1,0,0,0,0,1]).reshape(4,4)
T_mat_sen4_robot = np.array([np.sqrt(3)/2,1/2,0,0.3556*(np.sqrt(3)/2),-1/2,np.sqrt(3)/2,0,-0.3556/2,0,0,1,0,0,0,0,1]).reshape(4,4)
P_obs4_sen4 = np.array([d4,0,0,1]).reshape(4,1)
P_obs4_robot = np.dot(T_mat_sen4_robot,P_obs4_sen4)
P_obs4_world = np.dot(T_mat_robot_world,P_obs4_robot)
P_obs4 = np.concatenate([P_obs4_robot,P_obs4_world])
return P_obs4_robot
def us5(d5,xc,yc,thetac):
T_mat_robot_world= np.array([np.cos(thetac),-np.sin(thetac),0,xc,np.sin(thetac),np.cos(thetac),0,yc,0,0,1,0,0,0,0,1]).reshape(4,4)
T_mat_sen5_robot = np.array([0,1,0,0,-1,0,0,-0.33,0,0,1,0,0,0,0,1]).reshape(4,4)
P_obs5_sen5 = np.array([d5,0,0,1]).reshape(4,1)
P_obs5_robot = np.dot(T_mat_sen5_robot,P_obs5_sen5)
P_obs5_world = np.dot(T_mat_robot_world,P_obs5_robot)
P_obs5 = np.concatenate([P_obs5_robot,P_obs5_world])
return P_obs5_robot
###################################################### Odemetry
def odemetryCalc(xk,yk,thetak,l=0.19, N=2249, r=0.03):
global oldEncoder0
global oldEncoder1
global oldEncoder2
newEncoder0 = robot.encoder(0)
newEncoder1 = robot.encoder(1)
newEncoder2 = robot.encoder(2)
deltaEncoder0 = newEncoder0 - oldEncoder0
deltaEncoder1 = newEncoder1 - oldEncoder1
deltaEncoder2 = newEncoder2 - oldEncoder2
D0=(deltaEncoder0/N)*((2*np.pi*r))
D1=(deltaEncoder1/N)*((2*np.pi*r))
D2=(deltaEncoder2/N)*((2*np.pi*r))
kinematic_mat = np.array([1/np.sqrt(3),0,-1/np.sqrt(3),-1/3,2/3,-1/3,-1/(3*l),-1/(3*l),-1/(3*l)]).reshape(3,3)
rotation_mat= np.array([np.cos(thetak),-np.sin(thetak),0,np.sin(thetak),np.cos(thetak),0,0,0,1]).reshape(3,3)
# diffrence in ticks (rpm)
distance_mat = np.array([D1,D0,D2])[:,None]
oldPos_mat = np.array([xk,yk,thetak])[:,None]
# np.dot explanation https://stackoverflow.com/questions/21562986/numpy-matrix-vector-multiplication
kinxrot = np.dot(rotation_mat,kinematic_mat)
newPos_mat = oldPos_mat + np.dot(kinxrot,distance_mat)
oldEncoder0 = newEncoder0
oldEncoder1 = newEncoder1
oldEncoder2 = newEncoder2
return newPos_mat
try:
while True:
#~ print("current x = "+str(current_x))
#~ print("current y = "+str(current_y))
#~ print("current theta = "+str(current_theta))
xc = current_x
yc = current_y
thetac = current_theta
pose = odemetryCalc(xc,yc,thetac)
for x in range(6):
us[x] = robot.ultrasonic(x)
#~ print(str(us[0])+", "+str(us[1])+", "+str(us[2])+", "+str(us[3])+", "+str(us[4])+", "+str(us[5]))
d = [us[0],us[1],us[2],us[3],us[4],us[5]]
if d[x] != 0:
flag[x] = 1
else:
flag[x] = 0
if d == 0:
robot.motorVelocity(0,50,-50)
else:
d0 = d[0]
d1 = d[1]
d2 = d[2]
d3 = d[3]
d4 = d[4]
d5 = d[5]
vs0 = us0(d0,xc,yc,thetac)
vs1 = us1(d1,xc,yc,thetac)
vs2 = us2(d2,xc,yc,thetac)
vs3 = us3(d3,xc,yc,thetac)
vs4 = us4(d4,xc,yc,thetac)
vs5 = us5(d5,xc,yc,thetac)
v = flag[0]*vs0 + flag[1]*vs1 + flag[2]*vs2 + flag[3]*vs3 + flag[4]*vs4 + flag[5]*vs5
xd = -v.item(0)
yd = -v.item(1)
robot.move(xd,yd,0)
## Ctrl + c to stop robot
except KeyboardInterrupt:
# Close serial connection
robot.stop()
print('\n Stop!!! See you again!')
| huantianh/OMRE-SIUE | OMRE_Python/g2g&OA/obstacleAvoidance/obstacleAvoidance.py | obstacleAvoidance.py | py | 5,919 | python | en | code | 2 | github-code | 50 |
16806034536 | import logging
logger = logging.getLogger(__name__)
from sklearn.pipeline import Pipeline
from sklearn.base import TransformerMixin
import cudf
cat_features = ["B_30","B_38","D_114","D_116","D_117","D_120","D_126","D_63","D_64","D_66","D_68"]
class CuDFTransforms(TransformerMixin):
def __init__(self, cat_features):
self.cat_features = cat_features
def fit(self, df):
return self
def transform(self, df):
cat_features = self.cat_features
all_cols = [c for c in list(df.columns) if c not in ['customer_ID','S_2']]
num_features = [col for col in all_cols if col not in cat_features]
test_num_agg = df.groupby("customer_ID")[num_features].agg(['mean', 'std', 'min', 'max', 'last'])
test_num_agg.columns = ['_'.join(x) for x in test_num_agg.columns]
test_cat_agg = df.groupby("customer_ID")[cat_features].agg(['count', 'last', 'nunique'])
test_cat_agg.columns = ['_'.join(x) for x in test_cat_agg.columns]
df = cudf.concat([test_num_agg, test_cat_agg], axis=1)
del test_num_agg, test_cat_agg
logger.info(f"shape after feature engineering: {df.shape}" )
return df
class AmexPreProcessPipeline(Pipeline):
def __init__(self):
self.cat_features = cat_features
steps = [
('cudf_transforms', CuDFTransforms(cat_features)),
('passthrough', None)
]
super().__init__(steps) | sajwankit/amex | data/pipelines.py | pipelines.py | py | 1,457 | python | en | code | 0 | github-code | 50 |
22147381814 | import pandas as pd
df = pd.read_csv('data2.csv')
df.fillna(130, inplace = True)
print(df.to_string())
#Notice in the result: empty cells got the value 130 (in row 18, 22 and 28).
# Replace Empty Values
# Another way of dealing with empty cells is to insert a new value instead.
# This way you do not have to delete entire rows just because of some empty cells.
# The fillna() method allows us to replace empty cells with a value:
# Example
# Replace NULL values with the number 130: | StumbledUponCS/10_Python_Examples | Python Examples/05) Pandas/index307.py | index307.py | py | 504 | python | en | code | 0 | github-code | 50 |
16635289362 | from torch.utils.data import Dataset
import torch
import pandas as pd
class make_dataset(Dataset):
def __init__(self, data_path, tokenizer, run_type):
if data_path.endswith("csv"):
self._data = pd.read_csv(data_path)
elif data_path.endswith("tsv"):
self._data = pd.read_csv(data_path, sep='\t')
elif data_path.endswith("xlsx"):
self._data = pd.read_excel(data_path)
self.tokenizer = tokenizer
self.run_type = run_type
def __len__(self):
return len(self._data)
def __getitem__(self, index):
row = self._data.iloc[index]
if self.run_type == "train":
input_text = row["txt"]
label = row["label"]
return {
"input_ids": self.tokenizer.encode_as_ids(input_text),
"labels": label,
}
elif self.run_type == "test":
input_text = row["txt"]
return {"input_ids": self.tokenizer.encode_as_ids(input_text)}
def _padding(self, sequence, value, max_len):
padded_data = sequence + [value] * (max_len - len(sequence))
return padded_data
def collate_fn(self, batch):
max_len = max(len(row["input_ids"]) for row in batch)
if self.run_type == "train":
input_ids = [
self._padding(row["input_ids"], self.tokenizer.pad_id(), max_len)
for row in batch
]
labels = [row["labels"] for row in batch]
return {
"input_ids": torch.tensor(input_ids),
"labels": torch.tensor(labels),
}
elif self.run_type == "test":
input_ids = [
self._padding(row["input_ids"], self.tokenizer.pad_id(), max_len)
for row in batch
]
return {"input_ids": torch.tensor(input_ids)}
def make_result(self, preds, output_dir):
new_row = pd.DataFrame()
new_row["txt"] = self._data["txt"]
new_row["label"] = preds
new_row.to_excel(f"{output_dir}new_model.xlsx", index=False)
| sondonghup/TextClassification | dataset.py | dataset.py | py | 2,130 | python | en | code | 0 | github-code | 50 |
41236633369 | import torch
import torchvision.ops.misc as misc
from torchvision.models import resnet18, resnet50, resnet101
from torchvision.models.resnet import ResNet18_Weights, ResNet50_Weights, ResNet101_Weights
from torchvision.models._utils import IntermediateLayerGetter
from utils import is_main_process
class ResNetMultiScale(torch.nn.Module):
def __init__(self):
super().__init__()
backbone, self.num_channels = self.get_backbone()
for name, parameter in backbone.named_parameters():
if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
self.num_outputs = 3
return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
self.strides = [8, 16, 32]
self.intermediate_getter = IntermediateLayerGetter(backbone, return_layers=return_layers)
def get_backbone(self):
raise NotImplementedError('This method should be implemented by subclasses.')
def forward(self, tensor):
out = self.intermediate_getter(tensor)
return [out['0'], out['1'], out['2']]
class ResNet18MultiScale(ResNetMultiScale):
def __init__(self):
super().__init__()
def get_backbone(self):
return resnet18(
replace_stride_with_dilation=[False, False, False],
weights=ResNet18_Weights.DEFAULT if is_main_process() else None,
norm_layer=misc.FrozenBatchNorm2d
), [128, 256, 512]
class ResNet50MultiScale(ResNetMultiScale):
def __init__(self):
super().__init__()
def get_backbone(self):
return resnet50(
replace_stride_with_dilation=[False, False, False],
weights=ResNet50_Weights.IMAGENET1K_V1 if is_main_process() else None,
norm_layer=misc.FrozenBatchNorm2d
), [512, 1024, 2048]
class ResNet101MultiScale(ResNetMultiScale):
def __init__(self):
super().__init__()
def get_backbone(self):
return resnet101(
replace_stride_with_dilation=[False, False, False],
weights=ResNet101_Weights.DEFAULT if is_main_process() else None,
norm_layer=misc.FrozenBatchNorm2d
), [512, 1024, 2048]
| JeremyZhao1998/MRT-release | models/backbones.py | backbones.py | py | 2,297 | python | en | code | 5 | github-code | 50 |
72854789916 | import os
import time
import datetime
import torch
import argparse
from src.data import *
import warnings
import torch.distributed as dist
from src.defined_external_iterator import ExternalInputIterator
from src.defined_external_source import ExternalSourcePipeline
from src.COCOIterator import DALICOCOIterator
# from src.loss_function import Loss
from src.model import model, Loss
from src.utils_time import *
try:
from apex.parallel.LARC import LARC
from apex import amp
from apex.fp16_utils import *
except ImportError:
raise ImportError("Please install APEX from https://github.com/nvidia/apex")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=4000, help="number of epochs")
parser.add_argument("--start_epochs", type=int, default=0, help="number of epochs")
parser.add_argument("--batch_size", type=int, default=256, help="size of each image batch")
# prepare for dali module
parser.add_argument('--backbone', type=str, default='resnet50',
choices=['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'])
parser.add_argument('--data', '-d', type=str, default=None,
help='path to test and training data files')
parser.add_argument('--gpus', type=int, default=4,
help='number of the gpus')
parser.add_argument('--data_pipeline', type=str, default='no_dali', choices=['dali', 'no_dali'],
help='data preprocessing pipline to use')
parser.add_argument('--fp16-mode', type=str, default='off', choices=['off', 'static', 'amp'],
help='Half precission mode to use')
opt = parser.parse_args()
if opt.fp16_mode != 'off':
opt.fp16 = True
opt.amp = (opt.fp16_mode == 'amp')
else:
opt.fp16 = False
opt.amp = False
if opt.amp:
amp_handle = amp.init(enabled=opt.fp16)
model = model(opt)
optimizer = torch.optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3])
if opt.fp16:
print("INFO: Use Fp16")
if opt.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
# optimizer = amp_handle.wrap_optimizer(optimizer)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=128.)
# Prepare dataset
print("INFO: Prepare Datasets")
if opt.data_pipeline=='dali':
eii = ExternalInputIterator(opt.batch_size, opt.img_path, opt.annotation_path)
train_pipe = [ExternalSourcePipeline(opt.batch_size, num_threads=opt.n_cpu,
device_id=device_id, eii=eii) for device_id in range(opt.gpus)]
train_loader = DALICOCOIterator(train_pipe, len(eii.img_files))
# else:
# # Get dataloader
# dataset = *****
# train_loader = torch.utils.data.DataLoader(
# dataset,
# batch_size=opt.batch_size,
# shuffle=True,
# num_workers=opt.n_cpu,
# pin_memory=True,
# collate_fn=dataset.collate_fn
# )
tmp_lr = opt.lr
all_time = 0
for epoch in range(opt.start_epochs, opt.epochs):
start = time.time()
model.train()
for batch_i, datas in enumerate(train_loader):
# for batch_i, (imgNames, imgs, targets) in enumerate(train_loader):
for data in datas:
imgs = data[0][0].cuda()
targets = data[1][0].cuda()
label_id = data[2][0].cuda()
targets = torch.cat([label_id, targets], dim=1)
_, outputs = model(imgs)
loss = Loss(outputs, targets)
optimizer.zero_grad()
if opt.fp16:
if opt.amp:
with amp.scale_loss(loss, optimizer) as scale_loss:
scale_loss.backward()
else:
# optimizer.backward(loss)
loss.backward()
else:
loss.backward()
optimizer.step()
progress_bar(batch_i, int(train_loader._size / (opt.gpus * train_loader.batch_size)))
train_loader.reset()
print("Epoch time: {}".format(time.time()-start))
| cs-heibao/DALI-examples | dali_demo/train_multigpu.py | train_multigpu.py | py | 4,427 | python | en | code | 1 | github-code | 50 |
25331559022 | 'Chapter 6 Data Encoding and Processing'
import csv
"""
The main focus of this chapter is using Python to process data presented in different kinds of common encodings, such as CSV files, JSON, XML, and binary packed records. Unlike the chapter on data structures, this chapter is not focused on specific algorithms, but instead on the problem of getting data in and out of a program.
"""
'6.1 Reading and Writing CSV Data'
file = 'test.csv'
with open(file) as f:
f_csv = csv.reader(f)
headers = next(f_csv)
print("header",headers)
for row in f_csv:
print(row)
# access some certain fields, you will use index, such as
# row[0] (Stock Symbol) row[4] (Change)
# use Chapter 1 -- namedtuple make it convenient.
# use namedtuple instead of indexing
from collections import namedtuple
header=['sym','bol']
testNamedTuple=namedtuple('test', header)
mtest = testNamedTuple("AAA", "BBB")
print(mtest) # test(sym='AAA', bol='BBB')
with open(file) as f:
f_csv = csv.reader(f)
headings = next(f_csv)
Row = namedtuple('Row', headings)
print(Row)
for r in f_csv:
row = Row(*r)
print("Symbol", row.Symbol, "\t", "Price",row.Price)
# This would allow you to use the column headers such as row.Symbol and row.Change instead of indices.
# Another reader method : DictReader
# In this version, you would access the elements of each row using the row headers. For example, row['Symbol'] or row['Change'].
print("use DictReader instance")
with open(file) as f:
f_csv = csv.DictReader(f)
for row in f_csv:
print("\tSymbol:", row["Symbol"], "\t\tPrice:", row["Price"])
import csv
from pprint import pprint
from collections import OrderedDict
with open(file) as f:
f_csv = csv.DictReader(f)
for row in f_csv:
pprint(row)
# You will find the order is changed (not the header order)
# Keep the specified order? think about OrderedDict
##### Csv Writer
headers = ['Symbol','Price','Date','Time','Change','Volume']
rows = [('AA', 39.48, '6/11/2007', '9:36am', -0.18, 181800),
('AIG', 71.38, '6/11/2007', '9:36am', -0.15, 195500),
('AXP', 62.58, '6/11/2007', '9:36am', -0.46, 935000),
# write a new stock of my own
('HANG',9999,'03/01/2013','1:51pm','+14',100000)
]
# write mode1: normal writer
with open('mystocks_normal_way.csv','w') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
# Each row is a tuple, so write rows can write row-list
f_csv.writerows(rows)
print('write done!')
# write mode2 : Dcitwriter
#headers is same as preceding.
# Differently row list is a list of dicts.
rows = [{'Symbol':'AA', 'Price':39.48, 'Date':'6/11/2007',
'Time':'9:36am', 'Change':-0.18, 'Volume':181800},
{'Symbol':'AIG', 'Price': 71.38, 'Date':'6/11/2007',
'Time':'9:36am', 'Change':-0.15, 'Volume': 195500},
{'Symbol':'AXP', 'Price': 62.58, 'Date':'6/11/2007',
'Time':'9:36am', 'Change':-0.46, 'Volume': 935000},
]
with open('mystocks_dict_way.csv', 'w') as f:
f_csv = csv.DictWriter(f, headers)
f_csv.writeheader()
# write list of di
f_csv.writerows(rows)
# about TSV ( tab - sepate)
# Example of reading tab-separated values
# with open('stock.tsv') as f:
# f_tsv = csv.reader(f, delimiter='\t')
# for row in f_tsv:
# # Process row
# ...
"Want set specified type of data"
col_types = [str, float, str, str, float, int]
with open('test.csv', 'r') as f:
f_csv = csv.reader(f)
headers= next(f_csv)
for row in f_csv:
# Apply conversions to the row items
row = tuple(convert(value) for convert, value in zip(col_types, row))
print(row)
| jradd/pycookbook | old_Cookbook/Chapter6.py | Chapter6.py | py | 3,602 | python | en | code | 0 | github-code | 50 |
8087459285 |
"""
File: gen_wav.py
Date: 2017/03/24 12:36:27
Brief: 通过麦克风录音 生成 wav文件
"""
import machine
import array
import wave
from ulab import numpy as np
import struct
class GenAudio(object):
def __init__(self):
self.num_samples = 1000 #pyaudio内置缓冲大小
self.sampling_rate = 2000 #取样频率
self.level = 0 #声音保存的阈值
self.count_num = 0 #count_num个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音
self.save_length = 8 #声音记录的最小长度:save_length * num_samples 个取样
self.time_count = 20 #录音时间,单位s
self.voice_string = []
#保存文件
def save_wav(self, filename):
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(self.sampling_rate)
for i in self.voice_string:
wf.writeframes(struct.pack('<h',i))
wf.close()
def read_audio(self):
micpin = machine.Pin(33,machine.Pin.IN)
mic = machine.ADC(micpin)
mic.atten(machine.ADC.ATTN_11DB)
save_count = 0
save_buffer = []
time_count = self.time_count
while True:
time_count -= 1
# 读入num_samples个取样
string_audio_data = mic.read() * 1024
newarray = array.array("H",[])
for i in range(5000):
newarray.append(mic.read()*1024)
# 将读入的数据转换为数组
print("fine")
audio_data = np.array(newarray)
#计算大于 level 的取样的个数
large_sample_count = np.sum(audio_data > self.level)
print(np.max(audio_data)), "large_sample_count=>", large_sample_count
# 如果个数大于COUNT_NUM,则至少保存SAVE_LENGTH个块
if large_sample_count > self.count_num:
save_count = self.save_length
else:
save_count -= 1
if save_count < 0:
save_count = 0
if save_count > 0:
save_buffer.append(string_audio_data)
else:
if len(save_buffer) > 0:
self.voice_string = save_buffer
save_buffer = []
print("Recode a piece of voice successfully!")
return True
if time_count == 0:
if len(save_buffer) > 0:
self.voice_string = save_buffer
save_buffer = []
print("Recode a piece of voice successfully!")
return True
else:
return False
return True
if __name__ == "__main__":
r = GenAudio()
r.read_audio()
r.save_wav("./test.wav") | blackjackgg/mpython_raspberry | record.py | record.py | py | 2,971 | python | en | code | 0 | github-code | 50 |
11891810694 |
# this code is for half screen http://www.trex-game.skipser.com/ game
import numpy as np
import cv2
from mss import mss
from PIL import Image # grab screen
from pyautogui import press, keyDown,keyUp, hotkey # for keyboard
import time
########### change the distance between dino and points########
dist_x=25
##########################################
count =0
uy,ux=283,200+dist_x # PIXELS(inverted y,x) NOT COORDINATES of the point
ly,lx=307,200+dist_x
ly1,lx1=317,194+dist_x
bgy,bgx=50,50 # background
#######################
# mouse callback function for testing
def find_value(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print(x,y)
px=img[y,x]
print(px)
#####################
def draw():
cv2.circle(img, (lx,ly), 2, (255, 0, 0), -1) #(216,156)lower point COORDINATES
cv2.circle(img, (lx1,ly1), 2, (255, 0, 0), -1) #(216,156)lower point COORDINATES
cv2.circle(img, (ux,uy), 2, (255, 0, 0), -1)#(216,140)lower point COORDINATES
cv2.circle(img, (bgx,bgy), 2, (255, 0, 0), -1) # backgrund check coordinates
cv2.putText(img, 'Background', (305,50), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 1, cv2.LINE_AA)
###################
bounding_box = {"top": 100, "left": 0, "width": 830, "height": 500}
sct = mss()
x=input("Enter any key to start")
while True:
sct_img = sct.grab(bounding_box)
img=np.array(sct_img)
#print(img[166,516],type(img[166,516]))
px_d=list(img[ly,lx]) # lower pixel
px_d1=list(img[ly1,lx1])
px_u=list(img[uy,ux]) # upper pixel
px_bg=list(img[bgy,bgx])
if px_bg == [247,247,247,255]: # http://www.trex-game.skipser.com/
if (px_d != [247,247,247,255]) or (px_d1 != [247,247,247,255]) or (px_u != [247,247,247,255]): #[255,255,255,255] for black obtracles
#press('space') # loooong jump
press('space')
time.sleep(0.08)
press('down')
#count=count+1
#print("jump ", count)
draw()
cv2.imshow('screen',img )
cv2.setMouseCallback('screen',find_value)
if (cv2.waitKey(1) & 0xFF) == ord('q'):
cv2.destroyAllWindows()
break
| palashbhusari/dinosaur_game_opencv | half_screen.py | half_screen.py | py | 2,197 | python | en | code | 0 | github-code | 50 |
40158252720 | import FWCore.ParameterSet.Config as cms
process = cms.Process('Test')
process.source = cms.Source('EmptySource')
process.failing = cms.EDProducer('FailingProducer')
process.i = cms.EDProducer('IntProducer',
ivalue = cms.int32(10) )
process.out = cms.OutputModule('PoolOutputModule',
fileName = cms.untracked.string('unscheduled_fail_on_output.root'))
process.t = cms.Task(process.failing)
process.o = cms.EndPath(process.out, process.t)
process.p = cms.Path(process.i)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
| cms-sw/cmssw | FWCore/Integration/python/test/unscheduled_fail_on_output_cfg.py | unscheduled_fail_on_output_cfg.py | py | 623 | python | en | code | 985 | github-code | 50 |
20471493060 | ## IMPORT BASIC LIBS FOR SCRIPT
import scipy as sc
from scipy import io
from scipy import linalg
import bstates as bs
import pickle
import numpy as np
import feather
import h5py
import pandas as pd
from sklearn.decomposition import PCA
import os
from sys import argv
## TREATING ARGV VARS
nPats = int(argv[1]) # NUMBER OF SUBJECTS
pipe = argv[2] # PIPELINE
surr = int(argv[3])
## COMMAND TO ALLOW THE USE OF HDF5 IN NETWORK (HPC) STORAGE SYSTEMS
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
################################################################
##################### LOAD SYNC MATRICES #######################
################################################################
with open('../data/bsPat_1.pickle', 'rb') as f:
Phases, syncConnAux, leidaArrayAux = pickle.load(f)
leidaArray = np.zeros([leidaArrayAux.shape[0],
leidaArrayAux.shape[1],
nPats])
idx = 0
print("Loading pickle files - It's a bit of a pickle!")
for i in range(nPats):
with open('../data/bsPat_' + str(i + 1) + '.pickle', 'rb') as f:
Phases, syncConnAux, leidaArrayAux = pickle.load(f)
leidaArrayAux = leidaArrayAux[:, :, 0]
leidaArray[:, :, i] = leidaArrayAux
idx = idx + 1
print(idx)
################################################################
##################### MERGE SYNC MATRICES ######################
################################################################
for i in range(leidaArray.shape[2]):
if (i == 0):
ensembleArray = leidaArray[:, :, i]
else:
ensembleArray = np.vstack((ensembleArray, leidaArray[:, :, i]))
print(i)
del leidaArray
print("ensembleArray obtained (signal).")
## CREATING HDF5 FILE FOR ENSEMBLE ARRAY
h5f = h5py.File('../data/' + pipe + '.hdf5', 'w')
## SAVING ENSEMBLE ARRAY ON HDF5 FILE
h5f.create_dataset('ensembleArray', data = ensembleArray)
## CLEANING MEMORY TO ALLOW FURTHER OPERATIONS
del ensembleArray
print("Created HDF5")
print("ensembleArray saved")
print("Load dataset + clinical data")
## LOADING MATLAB FILE AND BOLD SIGNAL + CLINICAL DETAILS
mat = sc.io.loadmat('../data/2020_07_MASTER_connectomes90_All_select.mat',
squeeze_me = True)
################################################################
#################### LOADING CLINICAL DATA #####################
################################################################
sub = mat['sub'] # SUBJECTS
ses = mat['ses'] # SCAN SESSION
tt = 2280 # <- NEEDS AUTOMATION - DURATION OF SIGNAL PER PATIENT
ttime = np.arange(tt)
print("Clinical data loaded")
################################################################
##################### CREATE TAG VECTORS #######################
################################################################
print("Replicating clinical data")
## TRANFERING DATA TO VARIABLE THAT COMPOSE FINAL DATAFRAME
patTags = np.repeat(sub, tt).transpose()
ses = np.repeat(ses, tt).transpose()
ttime = np.tile(ttime, nPats).transpose()
## CLEANING MEMORY TO ALLOW FURTHER OPERATIONS
del mat
################################################################
##################### SAVING CLINICAL DATA #####################
################################################################
print("Saving data to HDF5 file")
dt = h5py.string_dtype(encoding = 'utf-8')
h5f.create_dataset('patTags', data = patTags, dtype = dt)
h5f.create_dataset('ses', data = ses, dtype = dt)
h5f.create_dataset('ttime', data = ttime)
## CLEANING MEMORY TO ALLOW FURTHER OPERATIONS
del patTags, ses
h5f.close()
print("Read HDF5")
h5f = h5py.File('../data/' + pipe + '.hdf5', 'r')
data = h5f['ensembleArray']
#data_fit = h5f['ensembleArray_fit']
print("Read HDF5 finished")
pca = PCA(n_components=50)
pca.fit(data)
varExp = pca.explained_variance_ratio_
with open('../data/varExp_' + pipe + '.pickle', 'wb') as f:
pickle.dump(varExp, f)
data_pca = pca.transform(data)
with open('../data/pca_out_' + pipe + '.pickle', 'wb') as f:
pickle.dump(data_pca, f)
print("PCA finished")
h5f.close()
print("Read HDF5")
h5f = h5py.File('../data/' + pipe + '.hdf5', 'r')
print("Load clinical data")
patTags = h5f['patTags']
ses = h5f['ses']
ttime = h5f['ttime']
data = h5f['ensembleArray']
print("Creating dataframe")
df = pd.DataFrame({'ttime': np.array(ttime),
'pc1': np.array(data_pca[:, 0]),
'pc2': np.array(data_pca[:, 1]),
'pc3': np.array(data_pca[:, 2]),
'patTags': np.array(patTags),
'ses': np.array(ses)})
print("Exporting dataframe")
df.to_csv('../data/df_' + pipe + '.csv')
h5f.close()
print("Routine finished")
| CoDe-Neuro/neonatal_dfc | src/run_pca.py | run_pca.py | py | 4,714 | python | en | code | 1 | github-code | 50 |
10718863127 | x=2
y=2
z=2
i="y*z**2"
j="x*y"
k="y*z"
def Ry(k,x,y,z,h):
ry=k.replace("x",str(x))
ry=ry.replace("z",str(z))
s1=ry.replace("y",str(y))
s2=ry.replace("y",str(y+h))
return (eval(s2)-eval(s1))/h
def Qz(j,x,y,z,h):
qz=j.replace("x",str(x))
qz=qz.replace("y",str(y))
s1=qz.replace("z",str(z))
s2=qz.replace("z",str(z+h))
return (eval(s2)-eval(s1))/h
def Pz(i,x,y,z,h):
pz=i.replace("x",str(x))
pz=pz.replace("y",str(y))
s1=pz.replace("z",str(z))
s2=pz.replace("z",str(z+h))
return (eval(s2)-eval(s1))/h
def Rx(k,x,y,z,h):
rx=k.replace("y",str(y))
rx=rx.replace("z",str(z))
s1=rx.replace("x",str(x))
s2=rx.replace("x",str(x+h))
return (eval(s2)-eval(s1))/h
def Qx(j,x,y,z,h):
qx=j.replace("y",str(y))
qx=qx.replace("z",str(z))
s1=qx.replace("x",str(x))
s2=qx.replace("x",str(x+h))
return (eval(s2)-eval(s1))/h
def Py(i,x,y,z,h):
py=i.replace("x",str(x))
py=py.replace("z",str(z))
s1=py.replace("y",str(y))
s2=py.replace("y",str(y+h))
return (eval(s2)-eval(s1))/h
h=1/10000
a=Ry(k,x,y,z,h)-Qz(j,x,y,z,h)
b=Pz(i,x,y,z,h)-Rx(k,x,y,z,h)
c=Qx(j,x,y,z,h)-Py(i,x,y,z,h)
print(round(a,5),"i, ",round(b,5),"j, ",round(c,5),"k, ")
| BryceP-44/ncalc | curl.py | curl.py | py | 1,296 | python | en | code | 0 | github-code | 50 |
23338484338 |
class Solution(object):
def kidsWithCandies(candies, extraCandies):
maxCandies=max(candies)
for i in range(len(candies)):
if candies[i] + extraCandies >= maxCandies:
candies[i]=True
else:
candies[i]=False
return candies
candies = [4,2,1,1,2]
result = kidsWithCandies(candies,1)
print('final result in array is:',candies) | lovepreetmultani/DS_Algo_Coding | Python/Arrays/max-candies.py | max-candies.py | py | 423 | python | en | code | 0 | github-code | 50 |
42145716064 | #!/usr/bin/python
# initializing string
test_str = "Gfg, is best : for ! Geeks ;"
# printing original string
print("The original string is : " + test_str)
# initializing punctuations string
punc = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
# Removing punctuations in string
# Using loop + punctuation string
for ele in test_str:
if ele in punc:
test_str = test_str.replace(ele, "")
# printing result
print("The string after punctuation filter : " + test_str)
| vishhaldawane/python | punctuation.py | punctuation.py | py | 459 | python | en | code | 0 | github-code | 50 |
22975654443 | import sys
try:
shell = sys.argv[1]
alias = sys.argv[2]
run = sys.argv[3]
run = run.replace('"', "")
run = run.replace("'", "")
except:
print("Usage: \n\tmkalias <shell> <alias> <command>")
sys.exit()
if shell == "zsh" or shell == "bash" or shell == "sh":
print("alias " + alias + "=\"" + run + "\"")
elif shell == "fish":
print("alias " + alias + " \"" + run + "\"")
else:
print("Sorry you're shell isn't supported. Please submit a issue on Github and I will try to include it.")
| XiKuuKy/mkalias | mkalias.py | mkalias.py | py | 524 | python | en | code | 2 | github-code | 50 |
41039690281 | # Un petit programme illustrant la détection de bords.
# Pour cela nous allons utiliser l'algorithme Canny de la librairie OpenCV.
# Sur une image en noir et blanc I, Canny fonctionne de la manière suivante:
# 1) Calculer la norme du gradient sur l'image I
# 2) Garder tous les maximums locaux du gradient sur I
# 3) Appliquer un double seuil aux points retenus. Soit ||g_ij|| la norme du gradient
# du pixel (i,j)
# - si ||g_ij|| > t1 alors (i,j) est un bord "fort"
# - si t1 > ||g_ij|| > t2 alors (i,j) est un bord "faible"
# - si t2 > ||g_ij|| alors (i,j) n'est pas un bord
# 4) Ne garder que les bords forts et les bords "faibles" connectés
# à un bord "fort"
# Importe la librairie Opencv avec ses librairies très utiles
import cv2
# Ouvre une nouvelle fenêtre
cv2.namedWindow("Canny")
# Capture la sortie de la webcam
vc = cv2.VideoCapture(0)
# Vérifie que la webcam transmet en effet des images
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
# Seuils de la norme gradient pour la détection de bords.
# Les pixels prennent des valeurs entières comprises entre 0 (noir)
# et 255 (blanc). Plus le seuil choisi est bas, plus grand seront le
# nombre de bords détectés.
threshold1 = 150
threshold2 = 125
# Fait tourner le programme en continu
while rval:
# Convertit l'image en cours en noir et blanc
black_and_white = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Détecte les bords avec Canny
edges = cv2.Canny(black_and_white, threshold1, threshold2)
# Affiche l'image dans la fenêtre "Canny"
cv2.imshow("Canny", edges)
# Capture la prochaine image de la webcam
rval, frame = vc.read()
# Si l'utilisateur appuie sur la touche numéro 20 (ESC) quitter la
# boucle
key = cv2.waitKey(20)
if key == 27:
break
# Détruit la fenêtre
cv2.destroyWindow("preview")
| Molugan/AI_summer_school | Demo_1/demo_canny.py | demo_canny.py | py | 1,868 | python | fr | code | 0 | github-code | 50 |
22453374695 | import re
from abc import ABC
import spacy
from bs4 import BeautifulSoup
from spacy.tokens import Span,Token
from utils import switcher_color_entities,switcher_color_semantics
class WordProcessing():
def __init__(self,soup, languaje,regex):
if languaje == "en":
self.nlp = spacy.load("en_core_web_sm")
elif languaje == "es":
self.nlp = spacy.load("es_core_news_sm")
else:
self.nlp = spacy.load("en_core_web_sm")
self.soup = soup
self.regex = regex
Token.set_extension("wikipedia_url",getter=get_wikipedia_url_semantics)
Span.set_extension("wikipedia_url", getter=get_wikipedia_url_entities)
def wordProcessing(self):
pass
def create_tag(self,name_tag, dic, text):
new_tag = self.soup.new_tag(name_tag)
new_tag.attrs = dic
new_tag.string = text
return new_tag
def get_wikipedia_url_entities(span):
if span.label_ in ("PERSON", "ORG", "GPE", "LOCATION"):
entity_text = span.text.replace(" ", "_")
return "https://en.wikipedia.org/w/index.php?search=" + entity_text
def get_wikipedia_url_semantics(token):
return "https://en.wikipedia.org/w/index.php?search=" + token.text
class EntitiesProcessing(WordProcessing):
def wordProcessing(self):
for tag in self.soup.find_all(self.regex):
with self.nlp.disable_pipes('tagger','parser'):
doc = self.nlp(tag.text)
new_tag = self.soup.new_tag(tag.name)
new_tag.attrs = tag.attrs
entities = { ent.start_char:ent for ent in doc.ents}
tokens = [token.text for token in doc]
jump = -1
s = ""
for index,char_s in enumerate(tag.text):
if index<=jump:
continue
if len(entities) != 0 and index in entities.keys():
ent = entities[index]
if ent._.wikipedia_url != None:
tag_font = self.create_tag(
"mark",
{"class": "entity",
"style": "background: "+ switcher_color_entities[ent.label_] +"; line-height: 1;"
" border-radius: 0.35em; box-decoration-break: clone;"
" -webkit-box-decoration-break: clone"
},
ent.text + " "
)
tag_font.append(self.create_tag(
"a",
{"style": "font-size: 0.8em; font-weight: bold; "
"line-height: 1; border-radius: 0.35em;"
" text-transform: uppercase; vertical-align: middle; ",
"href":ent._.wikipedia_url,
"target":"_blank"},
ent.label_
))
new_tag.append(tag_font)
else:
new_tag.append(ent.text)
jump = ent.end_char
s = ""
else:
s += char_s
if s.strip() in tokens:
new_tag.append(s)
s = ""
tag.replace_with(new_tag)
class SemanticsProcessing(WordProcessing):
def wordProcessing(self):
for tag in self.soup.find_all(self.regex):
with self.nlp.disable_pipes('parser', 'ner'):
doc = self.nlp(tag.text)
new_tag = self.soup.new_tag(tag.name)
new_tag.attrs = tag.attrs
for token in doc:
if token.pos_ in switcher_color_semantics.keys():
tag_font = self.create_tag(
"mark",
{"class": "entity",
"style": "background: "+ switcher_color_semantics[token.pos_] +"; line-height: 1;"
" border-radius: 0.35em; box-decoration-break: clone;"
" -webkit-box-decoration-break: clone"
},
token.text + " "
)
tag_font.append(self.create_tag(
"a",
{"style": "font-size: 0.5em; font-weight: bold; "
"line-height: 1; border-radius: 0.35em;"
" text-transform: uppercase; vertical-align: middle; ",
"href": token._.wikipedia_url,
"target": "_blank"
},
token.pos_
))
new_tag.append(tag_font)
else:
new_tag.append(token.text + " ")
tag.replace_with(new_tag)
| miguel-kjh/WebWordProcessing | WordProcessing.py | WordProcessing.py | py | 4,994 | python | en | code | 0 | github-code | 50 |
42363993534 | #!/usr/bin/env python
import os
import argparse
import numpy as np
# Parse session directories
parser = argparse.ArgumentParser()
parser.add_argument('--session_directory', dest='session_directory', action='store', type=str, help='path to session directory for which to measure performance')
args = parser.parse_args()
session_directory = args.session_directory
# Parse data from session (reposition log, reward values log)
transitions_directory = os.path.join(session_directory, 'transitions')
target_grasped_log = np.loadtxt(os.path.join(transitions_directory, 'target-grasped.log.txt'), delimiter=' ').astype(int)
reposition_log = np.loadtxt(os.path.join(transitions_directory, 'reposition.log.txt'), delimiter=' ').astype(int)
max_num_motion = 20
num_trials = len(reposition_log)
result_rec = []
num_motions_rec = []
for i in range(num_trials):
if i == 0:
result = reposition_log[i] <= max_num_motion and target_grasped_log[reposition_log[i]-1]
result_rec.append(result)
num_motions = min(reposition_log[i], max_num_motion)
num_motions_rec.append(num_motions)
else:
result = (reposition_log[i]-reposition_log[i-1]) <= max_num_motion and target_grasped_log[reposition_log[i]-1]
result_rec.append(result)
num_motions = min(reposition_log[i]-reposition_log[i-1], max_num_motion)
num_motions_rec.append(num_motions)
# Display results
print('Success rate %0.1f' % float(np.mean(result_rec)*100))
print('Mean number of motions %0.2f, std %0.2f' % (float(np.mean(num_motions_rec)), float(np.std(num_motions_rec))))
| choicelab/grasping-invisible | evaluate.py | evaluate.py | py | 1,591 | python | en | code | 43 | github-code | 50 |
4518453475 | """ Runs automech instancs for tests
"""
import os
# import tempfile
import numpy
from _util import run_mechdriver
# from _util import chk_therm
# from _util import chk_rates
# Set path where test input files and output data comparison exist
PATH = os.path.dirname(os.path.realpath(__file__))
DAT_PATH = os.path.join(PATH, 'data')
# Paths to the current input directory
CWD_INP_DIR = os.path.join(PATH, 'inp')
# Paths to the temp directory where tests will be run
# TMP_DIR = tempfile.mkdtemp()
TMP_DIR = os.path.join(os.getcwd(), 'tmp')
TMP_INP_DIR = os.path.join(TMP_DIR, 'inp')
TMP_RUN_DIR = os.path.join(TMP_DIR, 'run')
TMP_SAVE_DIR = os.path.join(TMP_DIR, 'save')
print(TMP_DIR)
# Set conditions
TEMPS = numpy.array([500.0, 1000.0, 1500.0, 2000.0])
PRESSURES = (1.0, 'high')
def test__rrho():
""" Run es, thermo, and rates for PES; standard run
"""
print('RUN test__rrho')
run_mechdriver('run_c2h6_h_rrho.temp',
TMP_DIR,
TMP_INP_DIR, CWD_INP_DIR,
TMP_RUN_DIR, TMP_SAVE_DIR)
# chk_therm('')
# chk_rates('')
def test__1dhrfa():
""" Run es, thermo, and rates for PES; standard run
"""
print('RUN test__1dhrfa')
run_mechdriver('run_c2h6_h_1dhrfa.temp',
TMP_DIR,
TMP_INP_DIR, CWD_INP_DIR,
TMP_RUN_DIR, TMP_SAVE_DIR)
# chk_therm('c2h6_h_1dhrfa_therm.ckin', 'all_therm.ckin',
# DAT_PATH, TMP_DIR,
# TMP_INP_DIR,
# TEMPS)
# chk_rates('c2h6_h_1dhrfa_rate.ckin', 'C2H7.ckin',
# DAT_PATH,
# TMP_DIR, # TMP_INP_DIR,
# PRESSURES, TEMPS)
def __instab():
""" Run es, thermo, and rates for PES with instabilities
"""
run_mechdriver('run_ch2ooh_rrho.temp',
TMP_DIR,
TMP_INP_DIR, CWD_INP_DIR,
TMP_RUN_DIR, TMP_SAVE_DIR)
def test__radrad():
""" Run es, thermo, and rates for PES with instabilities
"""
print('RUN test__radrad')
run_mechdriver('run_c2h5_h_1dhrfa.temp',
TMP_DIR,
TMP_INP_DIR, CWD_INP_DIR,
TMP_RUN_DIR, TMP_SAVE_DIR)
def test__proc():
""" Run ProcDriver
"""
print('RUN test__proc')
run_mechdriver('run_proc.temp',
TMP_DIR,
TMP_INP_DIR, CWD_INP_DIR,
TMP_RUN_DIR, TMP_SAVE_DIR)
if __name__ == '__main__':
test__rrho()
test__1dhrfa()
test__radrad()
test__proc()
| Auto-Mech/mechdriver | tests/test_workflow.py | test_workflow.py | py | 2,558 | python | en | code | 2 | github-code | 50 |
28645224843 | from sqrt import *
import numpy as np
import math
def check(n):
assert np.isclose(sqrt(n), math.sqrt(n))
def test_sqrt():
check(125348)
check(100)
check(1)
check(0)
| parrt/msan501-starterkit | stats/test_sqrt.py | test_sqrt.py | py | 189 | python | en | code | 5 | github-code | 50 |
4006519574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from itertools import *
from xapi.symbol import get_product
def get_fields_columns_formats(struct):
# 想转成DataFrame
# 对于单个的C,在转换的时候是看不出来的,这种应当转换成数字方便
columns = []
formats = []
for f in struct._fields_:
columns.append(f[0])
t = f[1]._type_
if isinstance(t, str):
formats.append(t)
else:
l = 'S' + str(f[1]._length_)
formats.append(l)
return columns, formats
def decode_dataframe(df, ctypes_struct=None):
"""
将DataFrame中的b字符串,转成str字符串
默认要object当成是字符串,如果中间出现别的也成字符串就会出错,这时就得原始ctypes来指明了
字符串转换后与从其它源如csv中进行相互处理时比较方便,但再要下单时又得转回成byte比较麻烦
:param df:
:param ctypes_struct:
:return:
"""
if ctypes_struct is None:
for i, f in enumerate(df.dtypes):
# 以object类型做判断可能有误
if f == object:
df.iloc[:, i] = df.iloc[:, i].str.decode('gbk')
else:
for f in ctypes_struct._fields_:
t = f[1]._type_
if isinstance(t, str):
pass
else:
name = f[0]
df[name] = df[name].str.decode('gbk')
return df
def encode_dataframe(df, ctypes_struct=None):
"""
将DataFrame中的b字符串,转成str字符串
默认要object当成是字符串,如果中间出现别的也成字符串就会出错,这时就得原始ctypes来指明了
字符串转换后与从其它源如csv中进行相互处理时比较方便,但再要下单时又得转回成byte比较麻烦
:param df:
:param ctypes_struct:
:return:
"""
if ctypes_struct is None:
for i, f in enumerate(df.dtypes):
# 以object类型做判断可能有误
if f == object:
df.iloc[:, i] = df.iloc[:, i].str.encode('gbk')
else:
for f in ctypes_struct._fields_:
t = f[1]._type_
if isinstance(t, str):
pass
else:
name = f[0]
df[name] = df[name].str.encode('gbk')
return df
def ctypes_dict_2_array(dict, dtype):
"""
将特殊的字典转成数组
:param dict:
:param dtype:
:return:
"""
values = []
keys = []
for k, v in dict.items():
d = np.frombuffer(v, dtype=dtype)
values.extend(d)
keys.extend(k)
return keys, values
def ctypes_dict_2_dataframe(dict, dtype):
"""
将ctypes字典转换成dataframe
:param dict:
:param dtype:
:return:
"""
keys, values = ctypes_dict_2_array(dict, dtype)
df = pd.DataFrame.from_records(values, columns=dtype.names)
return df
def extend_dataframe_product(df, iterables, columns=['InstrumentID', 'Side', 'HedgeFlag']):
"""
将持仓列表按合约,方向,投保,进行扩展
扩展的目的是为了后期计算方便
最好能对上期所的单子进行扩展处理,因为上海的单子需要处理今昨指令
TODO: 如果没有指定今昨,统一用平仓
:param df:
:param symbols:
:param columns:
:return:
"""
# 生成叉乘序列
# x = product(symbols, [0, 1])
x = product(*iterables) # symbols, [0, 1]
y = pd.DataFrame(list(x), columns=columns)
if df is None:
return None
z = pd.merge(df, y, how='outer', on=columns)
# 因为很多na,做运算会出问题,所以要填充
z.fillna(0, inplace=True)
return z
def lock_positions(df, columns, input_position, output_position):
"""
锁仓,通过开仓的方式达到会计上的平仓,用于解决某些直接平仓导致的手续费过高等问题
如果两头都有持仓,都取最大那个
建议锁仓记录存盘,然后由人来核对一下
:param df:
:param columns:
:param input_position:
:param output_position:
:return:
"""
# 先分组,对组里取最大值
grp = df.groupby(columns)
grp = grp.agg({input_position: 'max'})
# 由于两个字段完全一样,合并时会出错,所以得另行处理
grp.columns = [output_position]
grp = grp.reset_index()
x = pd.merge(df, grp, how='outer', on=columns)
return x
def close_one_row(series, close_today_first):
"""
平当前一行
对今昨按指定要求先后平仓
:param series:
:param close_today_first: 是否先平今
:return:
"""
ss = []
# 选择先平今还是平昨,先平的放循环前面
if close_today_first:
fields = ['TodayPosition', 'HistoryPosition']
else:
fields = ['HistoryPosition', 'TodayPosition']
leave = series['Open_Amount']
for i in range(len(fields)):
# 标记是否平今
series['CloseToday_Flag'] = int(fields[i] == 'TodayPosition')
if leave == 0:
# 没有要平的了,可以退了
break
sum_ = series[fields[i]] + leave
if sum_ < 0:
series['Open_Amount'] = - series[fields[i]]
if series['Open_Amount'] != 0:
ss.append(series.copy())
else:
series['Open_Amount'] = leave
ss.append(series.copy())
leave = sum_
return ss
def calc_target_orders(df, target_position, init_position, dont_close_today, shares_per_lot):
"""
计算中间变动委托单的记录
:param df:
:param target_position:
:param init_position:
:return:
"""
# 先将Long/Short转成1/-1,可用于后面的计算
# 目前从XAPI中拿到的是0/1转成1/-1
df['Long_Flag'] = 1 - 2 * df['Side'] # 多空方向
# 正负就是开平仓,后面会对它进行修改
df['Open_Amount'] = df[target_position] - df[init_position]
df2 = df[df['Open_Amount'] != 0] # 只留下仓位有变化的
if df2.empty:
return None
df2 = df2.assign(CloseToday_Flag=0) # 换成这个后不再出现SettingWithCopy警告
# 对于要平仓的数据,可以试着用循环的方法生成两条进行处理
df3 = []
for i in range(len(df2)):
s = df2.iloc[i].copy() # copy一下,后再才不会再出SettingWithCopy警告
# 上海的平仓操作需要分解成两个
if s['IsSHFE'] and s['Open_Amount'] < 0:
# 扩展成两个,这里只做平仓,不做开仓,所以逻辑会简单一些
# 但是针对不同的产品,开平的先后是有区别的
# 如果平今与平昨的价格相差不大,先开先平是没有区别的
# 如果开仓钱不够,还是应当先平
df3.extend(close_one_row(s, False))
else:
# 不用扩展
df3.append(s)
pass
df4 = pd.DataFrame.from_records(df3)
# 重新计算买卖数量,正负就是买卖
df4.loc[:, 'Buy_Amount'] = df4['Long_Flag'] * df4['Open_Amount']
# 对于区分今昨的数据,不平今仓
# 这个可以给股票使用
if dont_close_today:
df4 = df4[df4['CloseToday_Flag'] == 0]
# 股票买入时,需要按100的整数倍买
if shares_per_lot is not None:
# 由于负数调整成100时会导致下单数过多,所以转一下
# df4.ix[df4['Buy_Amount'] < 0, 'Buy_Amount'] = -(-df4['Buy_Amount'] // shares_per_lot * shares_per_lot)
df4.ix[df4['Buy_Amount'] > 0, 'Buy_Amount'] = df4['Buy_Amount'] // shares_per_lot * shares_per_lot
df4 = df4[df4['Buy_Amount'] != 0] # 只留下仓位有变化的
if df4.empty:
return None
return df4
def calc_target_orders_for_stock(df, target_position, init_position):
"""
计算中间变动委托单的记录
:param df:
:param target_position:
:param init_position:
:return:
"""
# 先将Long/Short转成1/-1,可用于后面的计算
# 目前从XAPI中拿到的是0/1转成1/-1
df['Long_Flag'] = 1 - 2 * df['Side'] # 多空方向
# 正负就是开平仓,后面会对它进行修改
df['Open_Amount'] = df[target_position] - df[init_position]
df2 = df[df['Open_Amount'] != 0] # 只留下仓位有变化的
if df2.empty:
return None
df2 = df2.assign(CloseToday_Flag=0) # 换成这个后不再出现SettingWithCopy警告
# 对于要平仓的数据,可以试着用循环的方法生成两条进行处理
df3 = []
for i in range(len(df2)):
s = df2.iloc[i].copy() # copy一下,后再才不会再出SettingWithCopy警告
# 上海的平仓操作需要分解成两个
if s['IsSSE'] and s['Open_Amount'] < 0:
# 扩展成两个,这里只做平仓,不做开仓,所以逻辑会简单一些
# 但是针对不同的产品,开平的先后是有区别的
# 如果平今与平昨的价格相差不大,先开先平是没有区别的
# 如果开仓钱不够,还是应当先平
df3.extend(close_one_row(s, False))
else:
# 不用扩展
df3.append(s)
pass
df4 = pd.DataFrame.from_records(df3)
# 重新计算买卖数量,正负就是买卖
df4.loc[:, 'Buy_Amount'] = df4['Long_Flag'] * df4['Open_Amount']
df4 = df4[df4['CloseToday_Flag'] == 0]
return df4
def merge_hedge_positions(df, hedge):
"""
将一个表中的多条记录进行合并,然后对冲
:param self:
:param df:
:return:
"""
# 临时使用,主要是因为i1709.与i1709一类在分组时会出问题,i1709.是由api中查询得到
if df.empty:
return df
df['Symbol'] = df['InstrumentID']
# 合并
df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag', 'Side'])[
'Position'].sum().to_frame().reset_index()
# print(df)
# 对冲
if hedge:
df['Net'] = df['Side'] * df['Position']
df = df.groupby(by=['Symbol', 'InstrumentID', 'HedgeFlag'])['Net'].sum().to_frame().reset_index()
df['Position'] = abs(df['Net'])
df['Side'] = df['Net'] / df['Position']
df = df[df['Position'] != 0]
df = df[['Symbol', 'InstrumentID', 'HedgeFlag', 'Side', 'Position']]
# print(df)
return df
def get_market_data(marketdata_dict_symbol, marketdata_dict_instrument, symbol, instrument):
"""
返回行情
:param marketdata_dict:
:param symbol:
:param instrument:
:return:
"""
marketdata = None
try:
marketdata = marketdata_dict_symbol[symbol]
except:
try:
marketdata = marketdata_dict_instrument[instrument]
except:
pass
return marketdata
def get_tick_size(instrument_dict3, symbol, instrument):
"""
先按symbol找,再按instrumentid找,最后按product找
:param instrument_dict3:
:param symbol:
:param instrument:
:return:
"""
_tick_size = 1
try:
# 直接存在,立即查找
_tick_size = instrument_dict3[symbol].PriceTick
except:
try:
# 不存在,查找同产品名的
_tick_size = instrument_dict3[instrument].PriceTick
except:
print('-' * 30, '有[新合约]出现,请抽空更新合约列表', '-' * 30)
try:
# 不存在,查找同产品名的
_product = get_product(instrument)
_tick_size = instrument_dict3[_product].PriceTick
except:
print('+' * 30, '有[新产品]出现,请立即更新合约列表', '+' * 30)
_tick_size = 1
return _tick_size
| sagpant/XAPI2 | languages/Python/xapi/utils.py | utils.py | py | 11,842 | python | zh | code | 1 | github-code | 50 |
16529096231 | import pandas as pd
import sys
import os
if len(sys.argv) != 2:
print("need results path arg")
df = pd.read_csv(sys.argv[1])
algos = ['ppo', 'ppo-x3', 'ppo-cma', 'ppo-dma', 'ou', 'zero']
blue_exp = df.iloc[0].blue_experiment
yellow_exp = df.iloc[0].yellow_experiment
blue_algos = [f'{blue_exp}_' + a for a in algos]
yellow_algos = [f'{yellow_exp}_' + a for a in algos]
goal_df = pd.DataFrame(index=blue_algos, columns=yellow_algos)
goal_std_df = goal_df.copy()
steps_df = goal_df.copy()
steps_std_df = goal_df.copy()
score_df = goal_df.copy()
for row in blue_algos:
for column in yellow_algos:
# get mean and std of goal score and ep len for row team vs column team
frame = df[df.blue_algo == row.split('_')[1]]
frame = frame[frame.yellow_algo == column.split('_')[1]]
goal_df.loc[row][column] = frame.goal_score.mean()
goal_std_df.loc[row][column] = frame.goal_score.std()
steps_df.loc[row][column] = frame.episode_length.mean()
steps_std_df.loc[row][column] = frame.episode_length.std()
score_df.loc[row][column] = frame.blue_score.mean()
results_path = os.path.join(os.path.dirname(sys.argv[1]), '0_summary.csv')
goal_df.astype(float).round(5).to_csv(results_path, mode='a')
goal_std_df.astype(float).round(5).to_csv(results_path, mode='a')
steps_df.astype(float).round(5).to_csv(results_path, mode='a')
steps_std_df.astype(float).round(5).to_csv(results_path, mode='a')
score_df.astype(float).round(5).to_csv(results_path, mode='a')
score_df.mean(1).astype(float).round(5).to_csv(results_path, mode='a')
| FelipeMartins96/rsoccer-isaac | get_stats.py | get_stats.py | py | 1,587 | python | en | code | 0 | github-code | 50 |
73027640475 | # This command will create the body for a PR from a template environmental variable in GitHub Actions
# using jinja2 for the template engine
import os
from jinja2 import Template
# Get the template from the templates directory
def create_pr_body(test=False):
# find the path in the dir structure to the template 'templates/pr-body-template.j2'
template_path = os.path.join(os.path.dirname(__file__), '..', 'templates', 'pr-body-template.j2')
# Open the template file
with open(template_path) as f:
# Read the template file
template = f.read()
# Get the issue name from the environment
issue_name = os.environ.get('ISSUE_NAME')
# Get the issue number from the environment
issue_number = os.environ.get('ISSUE_NUMBER')
file_name = None
# if test is true then set file_name to test_file.md
if test:
file_name = "test_file.md"
issue_name = "Test Issue"
issue_number = "42"
else:
# Get the file name from the .files_changed file
with open('.files_changed') as f:
file_name = f.read().strip()
if (issue_name == None or issue_number == None or file_name == None):
raise ValueError("ISSUE_NAME or ISSUE_NUMBER is not set of no file has been changed")
# Create the body of the PR
body = Template(template)
# Render the body of the PR
body = body.render( issue_name=issue_name, file_name=file_name, issue_number=issue_number)
# return the body of the PR
return body
def Main():
# Create the body of the PR
body = create_pr_body()
# Print the body of the PR
print(body)
# initialize the script using init
if __name__ == "__main__":
Main() | xn4p4lm/Books | scripts/issue_body_template.py | issue_body_template.py | py | 1,723 | python | en | code | 6 | github-code | 50 |
17731789852 | import sys
import collections
def read_data(fpath):
data = []
with open(fpath, "r") as fp:
lines = fp.readlines()
for i, line in enumerate(lines):
if not line.strip():
break
template = str(line.strip())
for line in lines[i:]:
if line.strip():
data.append(line.strip().split(" -> "))
return template, data
def main(fpath, steps):
template, data = read_data(fpath)
insertions = {dat[0]: dat[1] for dat in data}
insertion_counter = collections.Counter()
char_counter = collections.Counter(template)
previous = ""
# Initialise the insertion counter from the template
for tt in list(template):
if previous + tt in insertions:
i = insertions[previous + tt]
char_counter[i] += 1
ll = previous + i
rr = i + tt
if ll in insertions:
insertion_counter[ll] += 1
if rr in insertions:
insertion_counter[rr] += 1
previous = tt
# Use the number of occurences of each insertion in one iteration to
# compute the insertions for the next iteration
for _ in range(steps - 1):
next_counter = collections.Counter()
for k, v in insertion_counter.items():
ii = insertions[k]
char_counter[ii] += v
ll = k[0] + ii
rr = ii + k[1]
if ll in insertions:
next_counter[ll] += v
if rr in insertions:
next_counter[rr] += v
insertion_counter = next_counter
return char_counter.most_common()[0][1] - char_counter.most_common()[-1][1]
if __name__ == "__main__":
fpath = sys.argv[1]
answer_1 = main(fpath, 10)
print("Answer 1: ", answer_1)
answer_2 = main(fpath, 40)
print("Answer 2: ", answer_2)
| collinb9/advent-of-code | 2021/14.py | 14.py | py | 1,885 | python | en | code | 0 | github-code | 50 |
20487089679 | # -*- coding: utf-8 -*-
# @Time : 2022/1/23 6:37 下午
# @Author : zuokuijun
# @Email : zuokuijun13@163.com
"""
获取翼型拟合后的X以及Y坐标
这里将拟合数据点设置为70个
"""
import os
import numpy as np
from scipy.interpolate import splev, splprep, interp1d
from scipy.integrate import cumtrapz
from matplotlib import pyplot as plt
from Utils.data_normalization import mean_normalization
# 将全部的翼型数据点统一为70个
N = 70
k = 3
def interpolates(Q, N, k, D=20, resolution=1000):
''' Interpolate N points whose concentration is based on curvature. '''
res, fp, ier, msg = splprep(Q.T, u=None, k=k, s=1e-6, per=0, full_output=1)
tck, u = res
#
uu = np.linspace(u.min(), u.max(), resolution)
x, y = splev(uu, tck, der=0)
dx, dy = splev(uu, tck, der=1)
ddx, ddy = splev(uu, tck, der=2)
# 返回数字的绝对值
cv = np.abs(ddx*dy - dx*ddy)/(dx*dx + dy*dy)**1.5 + D
#
cv_int = cumtrapz(cv, uu, initial=0)
fcv = interp1d(cv_int, uu)
cv_int_samples = np.linspace(0, cv_int.max(), N)
u_new = fcv(cv_int_samples)
x_new, y_new = splev(u_new, tck, der=0)
return x_new, y_new, fp, ier
# 将插值后的X坐标进行保存
def save_x_coordinate(x_new, name):
f = open(f'../data/test/x/{name}.txt', mode='w', encoding='utf8')
for j in range(len(x_new)):
print("写入文件的数据为:{}".format(x_new[j]))
data = str(x_new[j]) + '\n'
f.writelines(data)
f.flush()
f.close()
# 将插值后的Y坐标进行保存
def save_y_coordinate(y_new, name):
f = open(f'../data/test/y/{name}.txt', mode='w', encoding='utf8')
for j in range(len(y_new)):
print("写入文件的数据为:{}".format(y_new[j]))
data = str(y_new[j]) + '\n'
f.writelines(data)
f.flush()
f.close()
# 将归一化后的Y坐标数据进行保存
def save_y_coordinate_normalization(y_new, name):
f = open(f'../data/test/normalize/{name}.txt', mode='w', encoding='utf8')
for j in range(len(y_new)):
print("写入文件的数据为:{}".format(y_new[j]))
data = str(y_new[j]) + '\n'
f.writelines(data)
f.flush()
f.close()
def get_xy_coordinate(path):
files = os.listdir(path)
print("UIUC 翼型数据库总计翼型数量为{}".format(len(files)))
for i in range(len(files)):
print("正在导出第{}个翼型的XY坐标数据... ...".format(i+1))
# 获取文件的绝对路径
file_path = os.path.join(path, files[i])
# 获取翼型文件名称
file_name = os.path.splitext(files[i])[0]
print("导出翼型{}的坐标数据... ...".format(file_name))
file = np.loadtxt(file_path)
x_new, y_new, fp, ier = interpolates(file, N=N, k=k)
# 导出插值后的X坐标数据
save_x_coordinate(x_new, file_name)
# 导出插值后的Y坐标数据
save_y_coordinate(y_new, file_name)
# 导出插值且归一化后的Y坐标数据
y_new_nor = mean_normalization(y_new)
save_y_coordinate_normalization(y_new_nor, file_name)
def test_plot_xy():
# x = "../data/CNN-FCN/x_coordinate/e378.txt"
# x = "../data/CNN-FCN/x_average.txt"
# y = "../data/CNN-FCN/y_coordinate/naca0006.txt"
# y_1 = "../data/CNN-FCN/y_coordinate_normalization/naca0006.txt"
y = "../data/uiuc/naca0012.dat"
# x = np.loadtxt(x)
m = np.loadtxt(y)
print(type(m))
print(m.shape)
y1 = m[:, 0]
y2 = m[:, 1]
# y_1 = np.loadtxt(y_1)
fig, ax = plt.subplots()
plt.plot(y1, y2, 'bo-', alpha=.5, label="True Value")
# plt.plot(x, y_1, 'ro-', alpha=.5, label="Normalization Value")
plt.legend(loc=1) # 指定legend的位置,读者可以自己help它的用法
# ax.set_xlim([-0.1, 1.1])
# ax.set_ylim([-0.5, 0.5])
# ax.set_xlim([-0.05, 1.02])
# ax.set_ylim([-0.3, 0.4])
plt.title("UIUC airfoil --naca0012")
plt.show()
# plt.legend(loc=2) # 指定legend的位置,读者可以自己help它的用法
if __name__ == '__main__':
dir = "../data/naca0012/"
get_xy_coordinate(dir)
# test_plot_xy()
| zuokuijun/Multi-head-attention-network | get_XY_coordinate.py | get_XY_coordinate.py | py | 4,182 | python | en | code | 7 | github-code | 50 |
2880577003 | from itertools import permutations
def check(perm, banned):
for i in range(len(perm)):
ui, bi = perm[i], banned[i]
if len(bi) != len(ui):
return False
for i in range(len(bi)):
if bi[i] != '*' and bi[i] != ui[i]:
return False
return True
def solution(user_id, banned_id):
result = set()
for perm in permutations(user_id, len(banned_id)):
if check(perm, banned_id):
result.add("".join(sorted(list(perm))))
return len(result) | GeonHyeongKim/2022-2-Algorithm-Study | src/chanhyun/week5/불량 사용자.py | 불량 사용자.py | py | 528 | python | en | code | 2 | github-code | 50 |
40513989039 | from tkinter import *
from PIL import ImageTk, Image
import tkinter.messagebox
import mysql.connector as mysql
def game(player1id, player2id, mainwindow):
window = Toplevel(mainwindow)
window.geometry("600x300")
p1 = PhotoImage(file='./turtle/assets/turtle1.png')
window.iconphoto(False, p1)
window.title("Turtle Game | VVLC")
window.focus_force()
con = mysql.connect(host="localhost", passwd="admin", user="root", database="vvlcdatabase")
cursor = con.cursor()
global winner, loser, player1pos, player2pos
player1pos = 0
player2pos = 0
winner = None
loser = None
def gameover(winner, loser):
tkinter.messagebox.showinfo("Game Over", f"{winner} defeated {loser}")
cursor.execute(f"SELECT * FROM PLAYERINFO WHERE USERNAME = '{winner}'")
data = cursor.fetchall()
data = data[0]
wins = data[4]
wins = str(wins)
if wins == "None":
wins = loser
else:
winlst = wins.split(", ")
if loser not in winlst:
wins += f", {loser}"
cursor.execute(f"UPDATE PLAYERINFO SET TURTLEWINS = '{wins}' WHERE USERNAME = '{winner}'")
con.commit()
exit()
def exit():
window.quit()
window.destroy()
def keypress(event):
global player1pos, player2pos, winner, loser
if winner == None:
if event.char == "z":
canvas.move(player1, 10, 0)
player1pos += 2
elif event.char == "m":
canvas.move(player2, 10, 0)
player2pos += 2
if player1pos == 110:
print("Player 1 Wins")
winner = player1id
loser = player2id
exitbutton.pack()
gameover(winner, loser)
elif player2pos == 110:
print("Player 2 Wins")
winner = player2id
loser = player1id
exitbutton.pack()
gameover(winner, loser)
exitbutton = Button(window, text="EXIT", command=exit)
canvas = Canvas(window, width=600, height=300)
canvas.pack()
img1 = Image.open("./turtle/assets/turtle2.png")
img1 = img1.resize((50, 50), Image.ANTIALIAS)
img1 = ImageTk.PhotoImage(img1)
img2 = Image.open("./turtle/assets/turtle3.png")
img2 = img2.resize((50, 50), Image.ANTIALIAS)
img2 = ImageTk.PhotoImage(img2)
img0 = Image.open("./turtle/assets/forestbg.png")
img0 = img0.resize((600, 300), Image.ANTIALIAS)
img0 = ImageTk.PhotoImage(img0)
bgimg = canvas.create_image(300, 150, image=img0)
player1 = canvas.create_image(20, 80, image=img1)
player2 = canvas.create_image(20, 150, image=img2)
canvas.create_line(50, 45, 50, 210, fill="red")
canvas.create_line(550, 45, 550, 210, fill="red")
window.bind("<Key>", keypress)
window.mainloop()
| tavignesh/school-project | turtle/turtle.py | turtle.py | py | 3,027 | python | en | code | 3 | github-code | 50 |
37778678677 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
X,Y,Z,value = [],[],[],[]
table = open("4_columns.csv").readlines()[1:]
for line in table:
x,y,z,val = line.split(',')[0],line.split(',')[1],line.split(',')[2],line.split(',')[3].strip()
X.append(int(x))
Y.append(int(y))
Z.append(int(z))
value.append(int(val))
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
#surf = ax.plot_surface(X,Y,value)
surf =ax.scatter(X,Y,value)
plt.show()
plt.savefig("demo.png")
| D-Barradas/DataViz | 3D_numpy_array_cardinalities/surface_test.py | surface_test.py | py | 521 | python | en | code | 0 | github-code | 50 |
15821268458 | import os
from math import pi, sin
NAMES_FILE = './generators/data/names.txt'
ORIGINAL_CUSTOMERS = './generators/data/original_data.txt'
ORIGINAL_PRODUCTS = './generators/data/products.txt'
EXPORT_PATH = './generated_data'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DEVELOPMENT = os.getenv('DEVELOPMENT', 'false')
DEVELOPMENT = DEVELOPMENT.lower() in ['true', 't', 'y', '1', 'ano', 'tak', 'da', 'naturlich']
NO_SIZE_TO_SIZE_RATIO = 0.3
PRODUCTS_PERCENTAGE_IN_STORE_MIN = 0.3
PRODUCTS_PERCENTAGE_IN_STORE_MAX = 1
NO_PREFERENCES_PROB = 0.8
STORE_OPEN_HOURS = 8
AGE_YOUNG_MID = 35
AGE_MID_OLD = 60
VENDORS = [
'Bellerose', 'Adidas', 'Polo', 'Hugo Boss', 'Nununu', 'Gap',
'Puma', 'J.Crew', 'Fred Perry', 'Aeropostale', 'Guess', 'Nike',
'Gymboree', 'Acrylick', 'Prada', 'Lacoste', 'CLSC', 'Converse',
'Gucci', 'H & M', 'TinyCottons', 'Calvin Klein', 'Chanel', 'Izod',
'Diesel', 'Armani', 'ZARA', 'Carhartt', 'Versace', 'Dior', 'Levis'
]
SIZES = ['XS-XXL', 'S-XL', 'S-L', '22-44', '28-38', '32-48', '28-48', 'one-size']
CATEGORIES = {
'Women': ['Rain jacket', 'Skirt', 'Tank top', 'Wool hat', 'Sweat pants',
'Dress pants', 'Beach sling', 'Denim cut-offs', 'Short sleeve Henley',
'Short sleeve polo', 'Overalls', 'V-neck t-shirt', 'Bucket hat',
'Cotton oxford', 'Romper', 'Skinny jean', 'Suspenders', 'Hawaiian shirt', 'Bathrobe',
'Sweatshirt', 'Pajama pants', 'Jeans', 'Flannel shirt', 'Onesy', 'Vest top',
'Cargo short', 'Dress socks', 'T-shirt', 'Dress'
],
'Men': ['Rain jacket', 'Tank top', 'Wool hat', 'Sweat pants',
'Dress pants', 'Beach sling', 'Denim cut-offs', 'Short sleeve Henley',
'Short sleeve polo', 'Overalls', 'V-neck t-shirt', 'Bucket hat',
'Cotton oxford', 'Romper', 'Skinny jean', 'Suspenders', 'Hawaiian shirt', 'Bathrobe',
'Sweatshirt', 'Pajama pants', 'Jeans', 'Flannel shirt', 'Onesy', 'Vest top',
'Cargo short', 'Dress socks', 'T-shirt', 'Tuxedo'
],
'Girls': ['Rain jacket', 'Skirt', 'Tank top', 'Wool hat', 'Sweat pants',
'Dress pants', 'Beach sling', 'Denim cut-offs', 'Short sleeve Henley',
'Short sleeve polo', 'Overalls', 'V-neck t-shirt', 'Bucket hat',
'Cotton oxford', 'Romper', 'Skinny jean', 'Suspenders', 'Hawaiian shirt', 'Bathrobe',
'Sweatshirt', 'Pajama pants', 'Jeans', 'Flannel shirt', 'Onesy', 'Vest top',
'Cargo short', 'Dress socks', 'T-shirt', 'Dress'
],
'Boys': ['Rain jacket', 'Tank top', 'Wool hat', 'Sweat pants',
'Dress pants', 'Beach sling', 'Denim cut-offs', 'Short sleeve Henley',
'Short sleeve polo', 'Overalls', 'V-neck t-shirt', 'Bucket hat',
'Cotton oxford', 'Romper', 'Skinny jean', 'Suspenders', 'Hawaiian shirt', 'Bathrobe',
'Sweatshirt', 'Pajama pants', 'Jeans', 'Flannel shirt', 'Onesy', 'Vest top',
'Cargo short', 'Dress socks', 'T-shirt', 'Tuxedo'
],
'Sport': ['Bathing suit', 'Bike short', 'Sport briefs', 'Sport jacket',
'Tenis skirt', 'Sport pants', 'Sport shorts', 'Swim trunk',
'Yoga skort', 'Sport coat', 'Backpack', 'Sport shoes'
]
}
from functools import reduce
CATEGORIES_UNIQUE = list(set(reduce(lambda a, b: a + b, [c for k, c in CATEGORIES.items()])))
COLORS = [ # colors from https://en.wikipedia.org/wiki/List_of_colors:_A%E2%80%93F
'Absolute Zero', 'Acid green', 'Aero', 'Aero blue', 'African violet', 'Air superiority blue', 'Alabaster',
'Alice blue', 'Alloy orange', 'Almond', 'Amaranth', 'Amaranth (M&P)', 'Amaranth pink', 'Amaranth purple',
'Amaranth red', 'Amazon', 'Amber', 'Amber (SAE/ECE)', 'Amethyst', 'Android green', 'Antique brass',
'Antique bronze', 'Antique fuchsia', 'Antique ruby', 'Antique white', 'Ao (English)', 'Apple green',
'Apricot', 'Aqua', 'Aquamarine', 'Arctic lime', 'Army green', 'Artichoke', 'Arylide yellow', 'Ash gray',
'Asparagus', 'Atomic tangerine', 'Auburn', 'Aureolin', 'Avocado', 'Azure', 'Azure (X11/web color)',
'Baby blue', 'Baby blue eyes', 'Baby pink', 'Baby powder', 'Baker-Miller pink', 'Banana Mania', 'Barbie Pink',
'Barn red', 'Battleship grey', 'Beau blue', 'Beaver', 'Beige', 'B\'dazzled blue', 'Big dip o’ruby', 'Bisque',
'Bistre', 'Bistre brown', 'Bitter lemon', 'Bitter lime', 'Bittersweet', 'Bittersweet shimmer', 'Black',
'Black bean', 'Black chocolate', 'Black coffee', 'Black coral', 'Black olive', 'Black Shadows',
'Blanched almond', 'Blast-off bronze', 'Bleu de France', 'Blizzard blue', 'Blond', 'Blood red', 'Blue',
'Blue (Crayola)', 'Blue (Munsell)', 'Blue (NCS)', 'Blue (Pantone)', 'Blue (pigment)', 'Blue (RYB)', 'Blue bell',
'Blue-gray', 'Blue-green', 'Blue-green (color wheel)', 'Blue jeans', 'Blue sapphire', 'Blue-violet',
'Blue-violet (Crayola)', 'Blue-violet (color wheel)', 'Blue yonder', 'Bluetiful', 'Blush', 'Bole', 'Bone',
'Bottle green', 'Brandy', 'Brick red', 'Bright green', 'Bright lilac', 'Bright maroon', 'Bright navy blue',
'Bright yellow (Crayola)', 'Brilliant rose', 'Brink pink', 'British racing green', 'Bronze', 'Brown', 'Brown sugar',
'Brunswick green', 'Bud green', 'Buff', 'Burgundy', 'Burlywood', 'Burnished brown', 'Burnt orange', 'Burnt sienna',
'Burnt umber', 'Byzantine', 'Byzantium', 'Cadet', 'Cadet blue', 'Cadet blue (Crayola)', 'Cadet grey',
'Cadmium green', 'Cadmium orange', 'Cadmium red', 'Cadmium yellow', 'Café au lait', 'Café noir',
'Cambridge blue', 'Camel', 'Cameo pink', 'Canary', 'Canary yellow', 'Candy apple red', 'Candy pink',
'Capri', 'Caput mortuum', 'Cardinal', 'Caribbean green', 'Carmine', 'Carmine (M&P)', 'Carnation pink',
'Carnelian', 'Carolina blue', 'Carrot orange', 'Castleton green', 'Catawba', 'Cedar Chest', 'Celadon',
'Celadon blue', 'Celadon green', 'Celeste', 'Celtic blue', 'Cerise', 'Cerulean', 'Cerulean blue', 'Cerulean frost',
'Cerulean (Crayola)', 'CG blue', 'CG red', 'Champagne', 'Champagne pink', 'Charcoal', 'Charleston green',
'Charm pink', 'Chartreuse (traditional)', 'Chartreuse (web)', 'Cherry blossom pink', 'Chestnut', 'Chili red',
'China pink', 'China rose', 'Chinese red', 'Chinese violet', 'Chinese yellow', 'Chocolate (traditional)',
'Chocolate (web)', 'Chocolate Cosmos', 'Chrome yellow', 'Cinereous', 'Cinnabar', 'Cinnamon Satin', 'Citrine',
'Citron', 'Claret', 'Cobalt blue', 'Cocoa brown', 'Coffee', 'Columbia Blue', 'Congo pink', 'Cool grey', 'Copper',
'Copper (Crayola)', 'Copper penny', 'Copper red', 'Copper rose', 'Coquelicot', 'Coral', 'Coral pink', 'Cordovan',
'Corn', 'Cornell red', 'Cornflower blue', 'Cornsilk', 'Cosmic cobalt', 'Cosmic latte', 'Coyote brown', 'Cotton candy',
'Cream', 'Crimson', 'Crimson (UA)', 'Crystal', 'Cultured', 'Cyan', 'Cyan (process)', 'Cyber grape', 'Cyber yellow',
'Cyclamen', 'Dark blue-gray', 'Dark brown', 'Dark byzantium', 'Dark cornflower blue', 'Dark cyan', 'Dark electric blue',
'Dark goldenrod', 'Dark green', 'Dark green (X11)', 'Dark jungle green', 'Dark khaki', 'Dark lava',
'Dark magenta', 'Dark moss green', 'Dark olive green', 'Dark orange', 'Dark orchid', 'Dark pastel green', 'Dark purple',
'Dark red', 'Dark salmon', 'Dark sea green', 'Dark sienna', 'Dark sky blue', 'Dark slate blue', 'Dark slate gray',
'Dark spring green', 'Dark turquoise', 'Dark violet', 'Dartmouth green', 'Davy\'s grey', 'Deep cerise', 'Deep champagne',
'Deep chestnut', 'Deep jungle green', 'Deep pink', 'Deep saffron', 'Deep sky blue', 'Deep Space Sparkle', 'Deep taupe',
'Denim', 'Denim blue', 'Desert', 'Desert sand', 'Dim gray', 'Dodger blue', 'Dogwood rose', 'Drab', 'Duke blue',
'Dutch white', 'Earth yellow', 'Ebony', 'Ecru', 'Eerie black', 'Eggplant', 'Eggshell', 'Egyptian blue', 'Eigengrau',
'Electric blue', 'Electric green', 'Electric indigo', 'Electric lime', 'Electric purple', 'Electric violet', 'Emerald',
'Eminence', 'English green', 'English lavender', 'English red', 'English vermillion', 'English violet', 'Erin',
'Eton blue', 'Fallow', 'Falu red', 'Fandango', 'Fandango pink', 'Fashion fuchsia', 'Fawn', 'Feldgrau', 'Fern green',
'Field drab', 'Fiery rose', 'Firebrick', 'Fire engine red', 'Fire opal', 'Flame', 'Flax', 'Flirt', 'Floral white',
'Fluorescent blue', 'Forest green (Crayola)', 'Forest green (traditional)', 'Forest green (web)', 'French beige',
'French bistre', 'French blue', 'French fuchsia', 'French lilac', 'French lime', 'French mauve', 'French pink',
'French raspberry', 'French rose', 'French sky blue', 'French violet', 'Frostbite', 'Fuchsia', 'Fuchsia (Crayola)',
'Fuchsia purple', 'Fuchsia rose', 'Fulvous', 'Fuzzy Wuzzy']
DEPARTMENTS = [
{
'name': 'Men',
'probability_of_buing': {
'gender': {'M': 1, 'F': 0.7},
'age': {
f"0-{AGE_YOUNG_MID - 1}": 1,
f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}": 0.9,
f"{AGE_MID_OLD}-200": 0.8
},
},
'department_size': 0.3
},
{
'name': 'Sport',
'probability_of_buing': {
'gender': {'M': 1, 'F': 1},
'age': {
f"0-{AGE_YOUNG_MID - 1}": 1,
f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}": 0.8,
f"{AGE_MID_OLD}-200": 0.6
},
},
'department_size': 0.1
},
{
'name': 'Boys',
'probability_of_buing': {
'gender': {'M': 0.8, 'F': 1},
'age': {
f"0-{AGE_YOUNG_MID - 1}": 0.7,
f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}": 0.6,
f"{AGE_MID_OLD}-200": 0.2
},
},
'department_size': 0.05
},
{
'name': 'Girls',
'probability_of_buing': {
'gender': {'M': 1, 'F': 0.8},
'age': {
f"0-{AGE_YOUNG_MID - 1}": 0.4,
f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}": 0.6,
f"{AGE_MID_OLD}-200": 0.2
},
},
'department_size': 0.05
},
{
'name': 'Women',
'probability_of_buing': {
'gender': {'M': 0.6, 'F': 1},
'age': {
f"0-{AGE_YOUNG_MID - 1}": 0.1,
f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}": 1,
f"{AGE_MID_OLD}-200": 0.9
},
},
'department_size': 0.5
}
]
assert sum([d['department_size'] for d in DEPARTMENTS]) == 1
COUPON_TYPES = {
'department': {
'min_discount_percentage': 5,
'max_discount_percentage': 70,
'probability_of_usage': 0.9,
'gen_coupons': True
},
'just_discount': {
'min_discount_percentage': 5,
'max_discount_percentage': 30,
'probability_of_usage': 0.95,
'gen_coupons': True
},
'buy_all': {
'min_discount_percentage': 5,
'max_discount_percentage': 70,
'min_products': 2,
'max_products': 5,
'probability_of_usage': 0.5,
'gen_coupons': True
},
'buy_more': {
'min_discount_percentage': 10,
'max_discount_percentage': 50,
'min_products': 2,
'max_products': 5,
'probability_of_usage': 0.7,
'gen_coupons': True
}
}
COUPONS_PER_DEPARTMENT = 5
def young_pref_function(x, i, elements):
e3 = elements / 3
if i < e3:
return x * (3 * sin(i * pi / e3) + 1)
return x
def mid_pref_function(x, i, elements):
e3 = elements / 3
if i > e3 and i < 2 * e3:
return x * (3 * sin((i + e3) * pi / e3) + 1)
return x
def old_pref_function(x, i, elements):
e3 = elements / 3
if i > 2 * e3:
return x * (3 * sin((i + (2 * e3)) * pi / e3) + 1)
return x
def women_pref_function(x, i, elements):
return x * (3 * (i / elements) + 1)
def men_pref_function(x, i, elements):
return x * (-3 * (i / elements) + 4)
AGE_PREF_FUNCTIONS = {
f"0-{AGE_YOUNG_MID - 1}": young_pref_function,
f"{AGE_YOUNG_MID}-{AGE_MID_OLD - 1}": mid_pref_function,
f"{AGE_MID_OLD}-200": old_pref_function
}
GENDER_PREF_FUNCTIONS = {
'F': women_pref_function,
'M': men_pref_function
}
MALE_COUPON_USAGE_PROBABILITY_WEIGHT = 0.8
FEMALE_COUPON_USAGE_PROBABILITY_WEIGHT = 0.9
| tejones/retailstoreofthefuture | artificial-data-generator/config.py | config.py | py | 12,333 | python | en | code | 4 | github-code | 50 |
32314142737 | import datetime
import pytz
from django.shortcuts import render, get_object_or_404
from django.views.generic.edit import CreateView
from django.contrib.auth.decorators import login_required
from .models import Post, Category, Meal
from django.contrib.postgres.search import SearchVector
from .forms import SearchForm
@login_required
def post_search(request):
#test
categories = Category.objects.all()
posts = Post.published.all()
#endtest
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
results = Post.published.annotate(search=SearchVector('title', 'description'),
).filter(search=query)
return render(request, 'recipe/post/search.html', {'form':form, 'query':query, 'results': results, 'categories': categories, 'posts': posts})
# Create your views here.
def post_list(request):
categories = Category.objects.all()
posts = Post.published.all()
now = datetime.datetime.now(pytz.utc)
current_hour = now.strftime("%H")
if current_hour >= '12' and current_hour < '18':
meals = Post.published.filter(meal__title = 'Lunch')
meal_title = "Lunch"
elif current_hour >= '18' and current_hour <'6':
meals = Post.published.filter(meal__title = 'Dinner')
meal_title = "Dinner"
else:
meals = Post.published.filter(meal__title = 'Breakfast')
meal_title = "Breakfast"
return render(request, 'recipe/post/list.html', {'categories': categories, 'posts': posts, 'meals': meals,
'meal_title': meal_title})
@login_required
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post, status='published', publish__year=year, publish__month=month, publish__day=day)
return render(request, 'recipe/post/detail.html', {'post':post})
def category_detail(request, pk):
category = Category.objects.get(id=pk)
detail_category = category.recipe_categories.all()
return render(request, 'recipe/post/category.html', {'category':category, 'detail_category':detail_category})
@login_required
def profile(request):
return render(request, 'recipe/post/profile.html', {} )
# @login_required
def shop(request):
return render(request, 'recipe/post/shop/products.html', {})
class CreateView(CreateView):
model = Post
context_object_name = 'recipe'
template_name='recipe/post/create.html'
fields = ['title', 'description', 'category', 'meal', 'image', 'ingredients', 'instructions', 'nutritions', 'minutes', 'status']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
| musaddiqaskira/cookwithme | recipe/views.py | views.py | py | 2,791 | python | en | code | 0 | github-code | 50 |
42979075369 | from dataloader_init import dataloader
from models import netG, netD
from config import image_size, batch_size, nz, lr, beta1, lsf, checkpoint_path
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.utils as vutils
import sys
loadCheckpnt = sys.argv[1]
num_epochs = int(sys.argv[2])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#Initialise TRAINING
# Initialize BCELoss function
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1
fake_label = 0
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
#initialise training paramters
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
label = torch.empty((batch_size,), device=device)
curr_epoch = 0
#load params from checkpoint if required
if loadCheckpnt == "True":
print("loading checkpoint")
checkpoint = torch.load(checkpoint_path)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
curr_epoch = checkpoint['curr_epoch']
G_losses = checkpoint['G_losses']
D_losses = checkpoint['D_losses']
epoch_start = curr_epoch
epoch_end = curr_epoch + num_epochs
netD.train()
netG.train()
print("Starting Training Loop for ", str(num_epochs), " epochs...")
# For each epoch
for epoch in range(epoch_start, epoch_end):
# For each batch in the dataloader
print( "epoch ", epoch)
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label.uniform_((1.0-lsf),1.0) # soft labelling factor
#label = torch.full((b_size,), real_label, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.uniform_(0.0,lsf) # fake
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Output training stats
#if i % 1 == 0:
# print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
# % (epoch, epoch_end, i, len(dataloader),
# errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == epoch_end-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
torch.save({
'curr_epoch': curr_epoch,
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
'optimizerG_state_dict': optimizerG.state_dict(),
'optimizerD_state_dict': optimizerD.state_dict(),
'G_losses': G_losses,
'D_losses': D_losses
}, checkpoint_path)
print("checkpoint saved")
| shaunfinn/movieGAN | GAN/project1/scripts/train.py | train.py | py | 5,104 | python | en | code | 0 | github-code | 50 |
14405368245 | import numpy as np
import time
import multiprocessing as mp
import matplotlib.pyplot as plt
"""
Paralellizing "MCI_multivar.py" to get
a distribution of multiple values for
integral.
"""
# Start points, end points of integrals
a = [0, 0, 0]
b = [np.pi, np.pi, np.pi]
# Number of points in arrays
ni = int(1e3)
N = 3*[ni]
# Collection of arrays for each variable
x = []
for i in range(len(a)):
x.append(np.linspace(a[i], b[i], N[i]))
def monte_carlo(func, args):
V = 1
randargs = []
f = func
for xi in args:
xi0 = xi[0]
xi1 = xi[-1]
Nxi = len(xi)
randxi = np.random.uniform(xi0, xi1, Nxi)
V *= (xi1 - xi0)
randargs.append(randxi)
W = f(randargs)
MCI = V * np.mean(W) # Uniform so that probability = area_graph / voluma
return MCI
# Any arbitrary function, just made a choice
def f(args):
s = 1
for xx in args:
s *= np.sin(xx)
return s # Know answer of integral should be 2**n
# Number of MC-cycles
M = int(1e5)
# Configuration list preparing args for function in paralellization
config = M*[[f,x]]
# Paralellization of code
if __name__ == '__main__':
# Running on 6 threads repeating M times
proc_num = 6
# Timing runtime
t0 = time.time()
with mp.Pool(proc_num) as pool:
mc = pool.starmap(monte_carlo, config)
t1 = time.time()
mc = np.array(mc)
print(f'Computation time {t1 - t0}s')
print(f'Mean I over all cycles: {np.mean(mc)}')
print(f'Analytic: {8}')
# Making figure of data
fig = plt.figure(facecolor='k')
ax = fig.add_subplot(facecolor='k')
ax.spines['bottom'].set_color('w') # Setting axis white
ax.spines['left'].set_color('w')
ax.tick_params(axis='x', colors='w') # Setting ticks white
ax.tick_params(axis='y', colors='w')
# Getting color-chart for histogram
get_cmap = plt.get_cmap('jet')
n, bins, patches = ax.hist(mc, bins=75)
cmap = np.array(get_cmap(n / np.max(n))) # Must normalize data
# Coloring patches based on number in bin
for color, p in zip(cmap, patches):
plt.setp(p, 'facecolor', color)
plt.show() | AntonBrekke/Code-resume | Python/Single Python-files/MCI_multivar_paralell.py | MCI_multivar_paralell.py | py | 2,204 | python | en | code | 0 | github-code | 50 |
2183611490 | import xxteaModule
import os
import shutil
import random
def ReadFile(filePath):
file_object = open(filePath,'rb')
all_the_text = file_object.read()
file_object.close()
return all_the_text
def WriteFile(filePath,all_the_text):
file_object = open(filePath,'wb')
file_object.write(all_the_text)
file_object.close()
def BakFile(filePath,all_the_text):
file_bak = filePath[:len(filePath)-3] + 'bak'
WriteFile(file_bak,all_the_text)
def to_bytes(bytes_or_str):
if isinstance(bytes_or_str, unicode):
return bytes_or_str.encode('utf-8')
return bytes_or_str
def ListLua(path):
fileList = []
for root,dirs,files in os.walk(path):
for eachfiles in files:
if eachfiles[-4:] == '.lua' :
fileList.append(root + '/' + eachfiles)
return fileList
def getRandomStr(num, key1 = 1, key2 = 255):
ret = ""
count = 1
while (count <= num):
ret = ret + unichr(random.randint(key1, key2))
count = count + 1
ret = to_bytes(ret)
ret = ret[0:num]
return ret
def getResRandomChar(key1):
start = key1
startAscii = ord(start)
if startAscii == 49:
return 4#start + getRandomStr(3)
elif 50 <= startAscii <= 90:
return 24#start + getRandomStr(23)
elif 91 <= startAscii <= 116:
return 43#start + getRandomStr(42)
elif 117 <= startAscii <= 125:
return 38#start + getRandomStr(37)
else:
return 36#start + getRandomStr(35)
def DecodeWithXxteaModule(filePath,key,signment, spaceNum):
all_the_text = ReadFile(filePath)
if all_the_text[:len(signment)] != signment :
return
spaceNum = getResRandomChar(all_the_text[len(signment)])
all_the_text = all_the_text[len(signment) + spaceNum:]
decrypt_content = xxteaModule.decrypt(all_the_text,key)
WriteFile(filePath,decrypt_content)
def DecodeLua(path,key,signment,spaceNum):
fileList = ListLua(path)
for files in fileList:
DecodeWithXxteaModule(files,key,signment,spaceNum)
def copyFiles(sourceDir,targetDir):
if sourceDir.find(".svn") > 0:
return
for file in os.listdir(sourceDir):
sourceFile = os.path.join(sourceDir, file)
targetFile = os.path.join(targetDir, file)
if os.path.isfile(sourceFile):
if not os.path.exists(targetDir):
os.makedirs(targetDir)
if not os.path.exists(targetFile) or targetFile[-4:] == '.lua' or(os.path.exists(targetFile) and (os.path.getsize(targetFile) != os.path.getsize(sourceFile))):
open(targetFile, "wb").write(open(sourceFile, "rb").read())
if os.path.isdir(sourceFile):
First_Directory = False
copyFiles(sourceFile, targetFile)
if __name__=="__main__":
projectPath = "../"
key = "ZYA14ageb8642F58"
signment = "Signal"
srcPath = 'sg/src'
spaceNum = 4
DecodeWithXxteaModule("E:/magicRpg/magic_ios/sglmjy/attempted/wrong/conswhat/essentialConsult.txt", key, signment, spaceNum)
| siwenHT/workCode | 项目打包/资源包生成/z_script_ios/xxteaDecrypt.py | xxteaDecrypt.py | py | 3,123 | python | en | code | 0 | github-code | 50 |
3210653490 | import unittest
class TwoSumBinarySearch(unittest.TestCase):
"""
Given a 1-indexed array of integers numbers that is already sorted in non-decreasing order,
find two numbers such that they add up to a specific target number.
Let these two numbers be numbers[index1] and numbers[index2] where 1 <= index1 < index2 <= numbers.length.
Return the indices of the two numbers, index1 and index2, added by one as an integer array [index1, index2].
The tests are generated such that there is exactly one solution. You may not use the same element twice.
Input: numbers = [2,7,11,15], target = 9
Output: [1,2]
Explanation: The sum of 2 and 7 is 9. Therefore, index1 = 1, index2 = 2. We return [1, 2].
"""
def two_sum(self, numbers, target):
"""
binary search approach, but still think 2 pointers is more efficient
"""
for i in range(len(numbers)):
l, r = i+1, len(numbers)-1
tmp = target - numbers[i]
while l <= r:
mid = (r+l)//2
if numbers[mid] == tmp:
return [i+1, mid+1]
elif numbers[mid] < tmp:
l = mid+1
else:
r = mid-1
def test_two_sum(self):
self.assertEqual(self.two_sum([2,7,11,15], 9), [1,2]) | EugeneStill/PythonCodeChallenges | binary_search_two_sum.py | binary_search_two_sum.py | py | 1,348 | python | en | code | 0 | github-code | 50 |
4825974242 | from functools import cached_property
from web3.contract.contract import ContractEvent
from web3.types import BlockIdentifier, EventData
from contract.base import Contract
class ERC20Contract(Contract):
@property
def abi(self) -> list[dict]:
return [
{
"name": "balanceOf",
"inputs": [
{"internalType": "address", "name": "account", "type": "address"}
],
"outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
"stateMutability": "view",
"type": "function",
},
{
"name": "decimals",
"inputs": [],
"outputs": [{"name": "", "type": "uint256"}],
"stateMutability": "view",
"type": "function",
},
{
"name": "name",
"inputs": [],
"outputs": [{"type": "string", "name": ""}],
"stateMutability": "view",
"type": "function",
},
{
"name": "symbol",
"inputs": [],
"outputs": [{"type": "string", "name": ""}],
"stateMutability": "view",
"type": "function",
},
{
"name": "totalSupply",
"inputs": [],
"outputs": [{"name": "", "type": "uint256"}],
"stateMutability": "view",
"type": "function",
},
{
"name": "Transfer",
"inputs": [
{"name": "_from", "type": "address", "indexed": True},
{"name": "_to", "type": "address", "indexed": True},
{"name": "_value", "type": "uint256", "indexed": False},
],
"anonymous": False,
"type": "event",
},
]
@cached_property
def name(self) -> str:
return self.contract.functions.name().call()
@cached_property
def symbol(self) -> str:
return self.contract.functions.symbol().call()
@cached_property
def precision(self) -> int:
decimals = self.contract.functions.decimals().call()
return 10**decimals
def total_supply(self, block_identifier: BlockIdentifier = "latest") -> int:
return self.contract.functions.totalSupply().call(
block_identifier=block_identifier
)
def balanceOf(
self, address: str, block_identifier: BlockIdentifier = "latest"
) -> int:
return self.contract.functions.balanceOf(address).call(
block_identifier=block_identifier
)
@cached_property
def transfer_event(self) -> ContractEvent:
return self.contract.events.Transfer
def get_transfer_events(
self,
fromBlock: BlockIdentifier,
toBlock: BlockIdentifier,
_from: str = None,
_to: str = None,
) -> list[EventData]:
argument_filters = {}
if _from:
argument_filters["_from"] = _from
if _to:
argument_filters["_to"] = _to
if not argument_filters:
argument_filters = None
return list(
self.transfer_event.get_logs(
argument_filters=argument_filters, fromBlock=fromBlock, toBlock=toBlock
)
)
class YERC20Contract(ERC20Contract):
@property
def abi(self) -> list[dict]:
return super().abi + [
{
"stateMutability": "view",
"type": "function",
"name": "pricePerShare",
"inputs": [],
"outputs": [{"name": "", "type": "uint256"}],
},
{
"stateMutability": "view",
"type": "function",
"name": "totalAssets",
"inputs": [],
"outputs": [{"name": "", "type": "uint256"}],
},
]
def pricePerShare(self, block_identifier: BlockIdentifier = "latest") -> int:
return self.contract.functions.pricePerShare().call(
block_identifier=block_identifier
)
def totalAssets(self, block_identifier: BlockIdentifier = "latest") -> int:
return self.contract.functions.totalAssets().call(
block_identifier=block_identifier
)
class BeefyERC20Contract(ERC20Contract):
@property
def abi(self) -> list[dict]:
return super().abi + [
{
"stateMutability": "view",
"type": "function",
"name": "getPricePerFullShare",
"inputs": [],
"outputs": [{"name": "", "type": "uint256"}],
}
]
def getPricePerFullShare(self, block_identifier: BlockIdentifier = "latest") -> int:
return self.contract.functions.getPricePerFullShare().call(
block_identifier=block_identifier
)
class ConcERC20Contract(Contract):
@property
def abi(self) -> list[dict]:
return [
{
"stateMutability": "view",
"type": "function",
"name": "getUserShare",
"inputs": [
{"type": "uint256", "name": "_pid"},
{"type": "address", "name": "_account"},
],
"outputs": [{"name": "", "type": "uint256"}],
},
{
"stateMutability": "view",
"type": "function",
"name": "getTotalShare",
"inputs": [{"type": "uint256", "name": "_pid"}],
"outputs": [{"name": "", "type": "uint256"}],
},
{
"stateMutability": "view",
"type": "function",
"name": "getTotalUnderlying",
"inputs": [{"type": "uint256", "name": "_pid"}],
"outputs": [{"name": "", "type": "uint256"}],
},
{
"name": "Deposit",
"type": "event",
"anonymous": False,
"inputs": [
{
"indexed": True,
"internalType": "uint256",
"name": "_pid",
"type": "uint256",
},
{
"indexed": True,
"internalType": "address",
"name": "_sender",
"type": "address",
},
{
"indexed": False,
"internalType": "uint256",
"name": "_amount",
"type": "uint256",
},
],
},
]
def getUserShare(
self, _pid: int, user: str, block_identifier: BlockIdentifier = "latest"
) -> int:
return self.contract.functions.getUserShare(_pid, user).call(
block_identifier=block_identifier
)
def getTotalShare(
self, _pid: int, block_identifier: BlockIdentifier = "latest"
) -> int:
return self.contract.functions.getTotalShare(_pid).call(
block_identifier=block_identifier
)
def getTotalUnderlying(
self, _pid: int, block_identifier: BlockIdentifier = "latest"
) -> int:
return self.contract.functions.getTotalUnderlying(_pid).call(
block_identifier=block_identifier
)
@cached_property
def deposit_event(self) -> ContractEvent:
return self.contract.events.Deposit
def get_deposit_events(
self,
fromBlock: BlockIdentifier,
toBlock: BlockIdentifier,
_pid: int = None,
) -> list[EventData]:
argument_filters = {}
if _pid:
argument_filters["_pid"] = _pid
if not argument_filters:
argument_filters = None
return list(
self.deposit_event.get_logs(
argument_filters=argument_filters, fromBlock=fromBlock, toBlock=toBlock
)
)
class FarmERC20Contract(BeefyERC20Contract):
@property
def abi(self) -> list[dict]:
return super().abi + [
{
"stateMutability": "view",
"type": "function",
"name": "underlyingBalanceWithInvestmentForHolder",
"inputs": [{"name": "holder", "type": "address"}],
"outputs": [{"name": "", "type": "uint256"}],
},
]
def underlyingBalanceWithInvestmentForHolder(
self, user: str, block_identifier: BlockIdentifier = "latest"
) -> int:
return self.contract.functions.underlyingBalanceWithInvestmentForHolder(
user
).call(block_identifier=block_identifier)
| curvefi/curve-snapshot | contract/erc20.py | erc20.py | py | 9,005 | python | en | code | 4 | github-code | 50 |
71929184476 | from maps.models import Marker
from closet.models import Subcategory
from django.contrib.gis.geos import Point
import csv
mapping = {
'name': "NOM",
'lat': "LAT",
'lon': "LONG",
'comment': "DESCRIPTION",
'rue': 'RUE',
'code': 'CODE',
'commune': 'COMMUNE',
'web': 'WEBSITE',
'phone': 'TEL',
'category': 'CATEGORIE',
}
f = open("/home/nikita/Downloads/CARTE DEWEY & REPAIR TOGETHER - 1 - RECUP' - HABI-ENER.csv", 'r')
reader = csv.reader(f, delimiter=",")
lines = list(reader)
head = lines.pop(0)
mapping_col = {key: head.index(full_name.strip()) for key, full_name in mapping.items()}
points = []
for line in lines:
point = {}
for name, col in mapping_col.items():
point[name] = line[col].strip()
if '' not in (point['name'], point['lat'], point['lon']):
points.append(point)
for point in points:
lat = point['lat']
if lat[-4] == '.':
lat = lat[:-4] + lat[-3:]
lon = point['lon']
if lon[-4] == '.':
lon = lon[:-4] + lat[-3:]
pos = Point(float(lon), float(lat))
m = Marker.objects.create(
name=point['name'],
position=pos,
comment=point['comment'],
web=point['web'],
phone=point['phone'],
adress="{0[rue]} {0[code]} {0[commune]}".format(point),
)
for cat in point['category'].split(','):
subcat, created = Subcategory.objects.get_or_create(name=cat.strip().capitalize())
m.subcategories.add(subcat)
m.save()
| couleurmate/DeweyMaps | importer.py | importer.py | py | 1,501 | python | en | code | 0 | github-code | 50 |
27338371650 | import numpy as np
import cv2
import scipy.spatial
from utils.transform import get_matrix_rotate_point_around_x, \
get_matrix_rotate_point_around_y, \
get_matrix_rotate_point_around_z
def render_mesh(image_ori, point_cloud_data, blank_background = True):
image_size = (image_ori.shape[1], image_ori.shape[0])
if not blank_background:
image = image_ori.copy()
else: image = np.zeros((image_size[1], image_size[0], 3), dtype=np.uint8)
# Define a simple light direction (you can adjust this as needed)
light_direction = np.array([0, 0, 1])
# Create a Delaunay triangulation to connect the points and form a mesh
triangles = scipy.spatial.Delaunay(point_cloud_data[:, :2])
for simplex in triangles.simplices:
vertices = point_cloud_data[simplex]
# Compute the face normal (in this case, we use the cross product of two edges)
edge1 = vertices[1] - vertices[0]
edge2 = vertices[2] - vertices[0]
face_normal = np.cross(edge1, edge2)
face_normal /= np.linalg.norm(face_normal)
# Calculate the intensity based on the dot product with the light direction
intensity = np.dot(face_normal, light_direction)
# Clip the intensity to avoid negative values
intensity = max(intensity, 0)
color=(intensity * 200, intensity * 255, intensity * 50)
# Draw the triangle with shading
cv2.fillPoly(image, [vertices[:, :2].astype(int)], color = color)
return image
def calculate_center(point_cloud):
# Calculate the center by finding the average of all point coordinates
num_points = len(point_cloud)
center = np.mean(point_cloud, axis=0)
return center
def rotate_point_cloud(point_cloud, rotation_matrix):
# Translate point cloud to the origin (center)
center = calculate_center(point_cloud)
translated_point_cloud = point_cloud - center
# Apply rotation to all points
rotated_point_cloud = np.dot(translated_point_cloud, rotation_matrix)
# Translate points back to their original positions
rotated_point_cloud += center
return rotated_point_cloud
def render_rotate_mesh(image_ori, point_cloud_data, delta_angle = 10, axis = 'y'):
list_view = []
for angle_degrees in range(-180, 180, delta_angle):
angle_radians = np.radians(angle_degrees)
if axis == 'x':
rotation_matrix = get_matrix_rotate_point_around_x(angle_radians)
elif axis == 'y':
rotation_matrix = get_matrix_rotate_point_around_y(angle_radians)
elif axis == 'z':
rotation_matrix = get_matrix_rotate_point_around_z(angle_radians)
else: raise Exception(f'axis is invalid, please use `x`, `y` or `z`')
# Rotate the point cloud
rotated_point_cloud = rotate_point_cloud(point_cloud_data, rotation_matrix)
new_mesh = render_mesh(image_ori, rotated_point_cloud, True)
list_view.append(new_mesh)
return list_view
| nguyentrongvan/MedFace3D | utils/render_mesh.py | render_mesh.py | py | 3,080 | python | en | code | 0 | github-code | 50 |
39442430150 | from tkinter import *
from tkinter.ttk import *
from tkinter.messagebox import *
import pyperclip as pc
from Log.log import *
import data
from data_base.database_manager import DatabaseManager
class FrameCommonThings(data.Data):
def Save(self, l, **kwargs):
b = 1
array = []
for j, i in enumerate(l):
if type(i) != Button:
if i.get().upper() != 'SELECT AN OPTION' and i.get().upper() != '':
if type(i) == Spinbox:
try:
array.append(int(i.get()))
except ValueError as e:
self.Warning(f'Entry No. {b} is int type!')
wr("error", e)
return
else:
array.append(i.get())
b += 1
else:
wr('Fill all Entry')
self.Warning(f'Entry No. {b} is Empty!')
return
else:
if i['text'] != '0.0':
array.append(int(i['text']))
else:
wr('fill amount')
self.Warning(f'Amount is not filled......')
return
if len(array) == 7:
DatabaseManager(self.NSD, array)
else:
DatabaseManager(self.FS, array, **kwargs)
for i in l:
if type(i) == Button:
i['text'] = '0.0'
elif type(i) == Combobox:
i.set(self.NONE)
else:
i.delete(0, 10000000)
i.insert(0, '')
def loop_one(self, list, root):
new_list_entry = []
new_list_Label = []
n = 1
for i in list:
k = list[i]
if k['widget'] == Combobox:
new_list_entry.append(k['widget'](root, value=k['value'], **k['option']))
new_list_entry[-1].grid(
row=n,
column=2,
padx=8,
pady=2,
**k['grid'])
new_list_entry[-1].set(self.NONE)
else:
new_list_entry.append(k['widget'](root, **k['option']))
print(k, new_list_entry[-1])
new_list_entry[-1].grid(row=n, column=2, padx=8, pady=2, **k['grid'])
if k['widget'] == Button:
new_list_entry[-1]['text'] = i
v = new_list_entry[-1]
new_list_entry[-1].bind('<Control-Key-u>', lambda a, n=v: self.ShortKeys(n, n.get().upper()))
new_list_entry[-1].bind('<Control-Key-l>', lambda a, n=v: self.ShortKeys(n, n.get().lower()))
new_list_entry[-1].bind('<Control-Key-t>', lambda a, n=v: self.ShortKeys(n, n.get().title()))
new_list_entry[-1].bind('<Control-Key-C>', lambda a: self.Copy_Past(new_list_entry, 'copy'))
new_list_entry[-1].bind('<Control-Key-V>', lambda a: self.Copy_Past(new_list_entry, 'paste'))
new_list_Label.append(Label(root, text=i, anchor="w",width=20))
new_list_Label[-1].grid(row=n, column=1, padx=3, pady=2)
n += 1
new_list_entry[-1].bind('<Return>', (lambda x: self.Save(new_list_entry)))
Save = Button(root, text='Save', command=lambda: self.Save(new_list_entry))
Save.grid(row=n, column=1, padx=3, pady=2, columnspan=2, ipadx=100)
return new_list_entry, Save
def Warning(self, message):
showwarning(title='Warning', message=message)
def ShortKeys(self, a, b):
a.delete(0, 1000000000)
a.insert(0, b)
def Copy_Past(self, list, type):
if type == 'copy':
a = ''
for i in list:
a += i.get() + '\n'
pc.copy(a)
elif type == 'paste':
b = pc.paste().split('\n')
if len(b) == 1:
return
n = 0
for a in list:
a.delete(0, 1000000000)
try:
a.insert(0, b[n])
except IndexError as a:
return
n += 1
| yashrasniya/collage-project-fees-submission- | Frame/frameCommanThings.py | frameCommanThings.py | py | 4,213 | python | en | code | 0 | github-code | 50 |
13720437642 | import tkinter as tk
from tkinter import ttk
def prev_ord(conn, username):
cur = conn.cursor()
def fetch_previous_orders():
query = """
SELECT o.orderid, o.orderdate, o.ordertype, a.albumname, a.albumver, g.pcname
FROM ordalb oa
INNER JOIN orders o ON oa.orderid = o.orderid
INNER JOIN apg p ON oa.apg = p.apgid
INNER JOIN album a ON p.albumid = a.albumid
INNER JOIN gift g ON p.giftid = g.giftid
WHERE oa.username = %s;
"""
cur.execute(query, (username,))
previous_orders = cur.fetchall()
return previous_orders
window = tk.Tk()
window.title("Previous Orders")
# Create the Treeview widget for the table
tree = ttk.Treeview(window)
tree["columns"] = ("orderid", "date", "type", "album", "version", "gift")
tree.heading("orderid", text="Order ID")
tree.heading("date", text="Date")
tree.heading("type", text="Type")
tree.heading("album", text="Album")
tree.heading("version", text="Version")
tree.heading("gift", text="Gift")
tree.pack()
# Fetch and display the previous orders
previous_orders = fetch_previous_orders()
for order in previous_orders:
order_id, order_date, order_type, album_name, album_version, gift_name = order
tree.insert("", "end", values=(order_id, order_date, order_type, album_name, album_version, gift_name))
window.mainloop()
# Close the database connection
cur.close()
| andricevaaa/bmstu-DBCP | src/previousorders.py | previousorders.py | py | 1,522 | python | en | code | 0 | github-code | 50 |
28070998645 | import pandas as pd
def get_mapping_dict(metadata):
metadata_path = metadata#"../metadata/metabolites_names.txt"
name_col = 1 #started from 0
metabolite_col=-1
map_dict = {}
with open(metadata_path, 'r') as f:
f.readline()
for line in f.readlines():
line = line.strip("\n")
row = line.split("\t")
id = row[metabolite_col]
if id!="":
map_dict[id] = id + "__" + row[name_col].replace(",","")
return map_dict
import PyPluMA
class MetaboliteMapPlugin:
def input(self, infile):
inputfile = open(infile, 'r')
self.parameters = dict()
for line in inputfile:
contents = line.strip().split('\t')
self.parameters[contents[0]] = contents[1]
def run(self):
pass
def output(self, outfile):
in_file = PyPluMA.prefix()+"/"+self.parameters["csvfile"]
out_file = outfile
met_col = 0
sep=","
map_dict = get_mapping_dict(PyPluMA.prefix()+"/"+self.parameters["metadata"])
# Map to name
with open(in_file, 'r') as f:
with open(out_file, 'w') as w:
line1 = f.readline()
w.write(line1)
for line in f.readlines():
row = line.split(sep)
id = row[met_col]
if "HMDB" in id:
new_id = map_dict[id]
new_entry = ""
for i, element in enumerate(row):
if i==0:
new_entry = new_id
else:
new_entry += element
new_entry+=sep
new_entry = new_entry.strip(sep)
w.write(new_entry)
else:
w.write(line)
| movingpictures83/MetaboliteMap | MetaboliteMapPlugin.py | MetaboliteMapPlugin.py | py | 1,910 | python | en | code | 0 | github-code | 50 |
9107962004 | # ----------------------- PARHAM KHOSSRAVI ------------------------
import tkinter
import sqlite3
import datetime
x=sqlite3.Connection("shop82.db")
print("connect to database!!")
#------------- create users-----------------
# query='''create table user82
# (id integer primary key,
# user char(30) not null,
# psw char(30) not null,
# addr char(40) not null)'''
# x.execute(query)
# x.close()
#------------------- insert into----------------
# query='''insert into user82(user,psw,addr)
# values('admin','123456789','rasht')'''
# x.execute(query)
# x.commit()
#------------------- create table product -------
# query='''CREATE TABLE products82
# ( ID INTEGER PRIMARY KEY,
# name CHAR(20) NOT NULL,
# price int NOT NULL,
# qnt int NOT NULL,
# comment TEXT,
# time char(20) not null)'''
# x.execute(query)
# x.close()
#-------- insert into products table -------------------
# query='''INSERT INTO products82 (name,price,qnt,time)
# VALUES ('phone',2500,15,"?") '''
# x.execute(query)
# x.commit()
# x.close()
def delete():
txt_user.delete(0,"end")
txt_psw.delete(0,"end")
#------------------ login-------------------------
def login():
user=txt_user.get()
psw=txt_psw.get()
if user=="admin":
btn_admin_plan.configure(state="active",command=admin_plan)
if len(user)==0 or len(psw)==0:
lbl_msg.configure(text="please fill inputs",fg="red")
return
query='''select * from user82 where user=? and psw=?'''
result=x.execute(query,(user,psw))
rows=result.fetchall()
if len(rows)==0:
lbl_msg.configure(text="username or password are wrong",fg="red")
return
else:
lbl_msg.configure(text="welcome to your acount",fg="green")
delete()
btn_login.configure(state="disabled")
btn_logout.configure(state="active")
btn_shop.configure(state="active")
# btn_admin_plan.configure(state="disabled")
#--------------------- logout---------------------------
def logout():
btn_login.configure(state="active")
btn_logout.configure(state="disabled")
btn_shop.configure(state="disabled")
btn_admin_plan.configure("disabled")
lbl_msg.configure(text="you are logged out",fg="green")
#--------------------- submit----------------------------
def fainal_submit():
user=txt_user1.get()
psw=txt_psw1.get()
addr=txt_addr1.get()
if len(user)==0 or len(psw)==0 or len(addr)==0:
lbl_msg1.configure(text="please fill inputs",fg="red")
return
if len(psw)<8:
lbl_msg.configure(text="password lenght wrong",fg="red")
return
query='''select * from user82 where user=? '''
result=x.execute(query,(user,))
rows=result.fetchall()
if len(rows)!=0:
lbl_msg1.configure(text="Username already exist",fg="red")
return
else:
query='''insert into user82(user,psw,addr)
values(?,?,?)'''
x.execute(query,(user,psw,addr))
x.commit()
lbl_msg1.configure(text="submit done",fg="green")
txt_user1.delete(0,"end")
txt_psw1.delete(0,"end")
txt_addr1.delete(0,"end")
def submit():
global txt_addr1,txt_psw1,txt_user1,lbl_msg1
win_submit=tkinter.Toplevel(win)
win_submit.title("project")
win_submit.geometry("200x200")
lbl_user1=tkinter.Label(win_submit,text="Username: ")
lbl_user1.pack()
txt_user1=tkinter.Entry(win_submit,width=30)
txt_user1.pack()
lbl_psw1=tkinter.Label(win_submit,text="Password: ")
lbl_psw1.pack()
txt_psw1=tkinter.Entry(win_submit,width=30)
txt_psw1.pack()
lbl_addr1=tkinter.Label(win_submit,text="Addres")
lbl_addr1.pack()
txt_addr1=tkinter.Entry(win_submit,width=30)
txt_addr1.pack()
lbl_msg1=tkinter.Label(win_submit,text="")
lbl_msg1.pack()
btn_login=tkinter.Button(win_submit,text="submit now!",fg="brown",command=fainal_submit)
btn_login.pack()
win_submit.mainloop()
#------------------- shop------------------------
def shop():
win_shop=tkinter.Toplevel(win)
win_shop.title("shop")
win_shop.geometry("400x400")
lstbox=tkinter.Listbox(win_shop,width=40)
lstbox.pack()
query='''SELECT user FROM products'''
result=x.execute(query)
rows=result.fetchall()
for product in rows:
mystr=f"id: {product[0]} name: {product[1]} price: {product[2]} qnt: {product[3]} "
lstbox.insert(0, mystr)
win_shop.mainloop()
#---------------------- read user --------------------------
def lst_user():
query='''select * from user82 '''
result=x.execute(query)
rows=result.fetchall()
for items in rows:
lstbox2.insert(0,items)
#--------------------- list products ------------------------
def lst_products():
win_lst_product=tkinter.Toplevel(win)
win_lst_product.title("product")
win_lst_product.geometry("400x400")
lbl_pro=tkinter.Label(win_lst_product,text="report product",bg="green")
lbl_pro.pack()
PBox=tkinter.Listbox(win_lst_product,width=40)
PBox.pack()
query='''select name from products82'''
result=x.execute(query)
rows=result.fetchall()
for pname in rows:
PBox.insert(0,pname)
win_lst_product.mainloop()
#------------------------ finish product --------------------
def lst_finish():
win_lst_finish=tkinter.Toplevel(win)
win_lst_finish.title("product")
win_lst_finish.geometry("400x400")
lbl_finish=tkinter.Label(win_lst_finish,text="report product",bg="gray")
lbl_finish.pack()
PBox=tkinter.Listbox(win_lst_finish,width=40)
PBox.pack()
query='''select * from products82 where qnt=0'''
result=x.execute(query)
rows=result.fetchall()
for finish in rows:
PBox.insert(0,finish)
win_lst_finish.mainloop()
#---------------------- high shop -----------------------------------
def lst_hshop():
win_lst_hshop=tkinter.Toplevel(win)
win_lst_hshop.title("product")
win_lst_hshop.geometry("400x400")
lbl_h=tkinter.Label(win_lst_hshop,text="report product",bg="brown")
lbl_h.pack()
hBox=tkinter.Listbox(win_lst_hshop,width=40)
hBox.pack()
query='''select * from products82 where price!=0'''
result=x.execute(query)
rows=result.fetchall()
for high in rows:
st=f"high shope: {high[0]} "
hBox.insert(0,st)
win_lst_hshop.mainloop()
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ time @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
def time():
win_time=tkinter.Toplevel(win)
win_time.title("Products bought in specific dates: " )
win_time.geometry("400x400")
query = '''SELECT * FROM products82 WHERE time= ? '''
result=x.execute(query,(time,))
rows=result.fetchall()
timebox=tkinter.Listbox(win_time,width=40)
timebox.pack()
for product in rows :
timebox.insert(0,product)
win_time.mainloop()
def time2():
Time=datetime.date.today()
query=''' update product82 set time=?'''
x.execute(query,(Time,))
x.commit()
#----------------------- admin paln -------------------------
def admin_plan():
global btn_user2,lstbox2
win_admin=tkinter.Toplevel(win)
win_admin.title("admin paln")
win_admin.geometry("400x400")
lbl_ad=tkinter.Label(win_admin,text="report user",bg="red")
lbl_ad.pack()
lstbox2=tkinter.Listbox(win_admin,width=40)
lstbox2.pack()
btn_user2=tkinter.Button(win_admin,text="list user",command=lst_user)
btn_user2.pack()
btn_prouduct2=tkinter.Button(win_admin,text="list prouduct",command=lst_products)
btn_prouduct2.pack()
btn_finish=tkinter.Button(win_admin,text="list finish",command=lst_finish)
btn_finish.pack()
btn_high=tkinter.Button(win_admin,text="list high shop",command=lst_hshop)
btn_high.pack()
btn_time=tkinter.Button(win_admin,text="time",command=time)
btn_time.pack()
win_admin.mainloop()
#----------------------- main------------------------------------
win=tkinter.Tk()
win.title("project")
win.geometry("400x400")
lbl_user=tkinter.Label(win,text="Username: ")
lbl_user.pack()
txt_user=tkinter.Entry(win,width=30)
txt_user.pack()
lbl_psw=tkinter.Label(win,text="Password: ")
lbl_psw.pack()
txt_psw=tkinter.Entry(win,width=30)
txt_psw.pack()
lbl_msg=tkinter.Label(win,text="")
lbl_msg.pack()
btn_login=tkinter.Button(win,text="login",fg="blue",command=login)
btn_login.pack()
btn_logout=tkinter.Button(text="logout",state="disabled",command=logout)
btn_logout.pack()
btn_submit=tkinter.Button(win,text="submit",fg="blue",command=submit)
btn_submit.pack()
btn_shop=tkinter.Button(win,text="shop",state="disabled",command=shop)
btn_shop.pack()
btn_admin_plan=tkinter.Button(win,text="admin plan",state="disabled",command=admin_plan)
btn_admin_plan.pack()
win.mainloop()
| parham82/login_shop822 | project.mian.py | project.mian.py | py | 9,172 | python | en | code | 0 | github-code | 50 |
11139716277 | from django.forms.models import model_to_dict
from django.shortcuts import redirect, render
from django.views import View
from marketplace.models import Marketplace as MMarketplace
from marketplace.models import MarketplaceForm
from marketplace.models import MarketplaceSettings as MMarketplaceSettings
from marketplace.models import MarketplaceSettingsForm
class Marketplace(View):
form_market = MarketplaceForm
template = "marketplace-form.html"
def get(self, request, action=None, id=None, **kwargs):
if action and action == "new":
form = self.form_market
return render(request, self.template, {"form": form})
if action and action == "update" and id:
return self.put(request, id=id)
if action and action == "delete" and id:
return self.delete(request, id=id)
marketplaces = MMarketplace.objects.all()
return render(request, "marketplace.html", {"marketplaces": marketplaces})
def post(self, request, action=None, id=None, **kwargs):
if id:
data = MMarketplace.objects.get(id=id)
form = self.form_market(request.POST, instance=data)
else:
form = self.form_market(request.POST)
if form.is_valid():
form.save()
return redirect("/marketplaces", id=id)
def put(self, request, action=None, id=None, **kwargs):
data = MMarketplace.objects.get(id=id)
try:
settings = MMarketplaceSettings.objects.all().filter(marketplace=id)
except MMarketplaceSettings.DoesNotExist:
settings = None
form = self.form_market(initial=model_to_dict(data))
return render(
request, self.template, {"form": form, "settings": settings, "id": data.id}
)
def delete(self, request, action=None, id=None):
data = MMarketplace.objects.get(id=id)
data.delete()
return redirect("/marketplaces")
class MarketplaceSettings(View):
form_settings = MarketplaceSettingsForm
template = "settings-form.html"
def get(self, request, action=None, id=None, **kwargs):
if action and action == "new":
form = self.form_settings
return render(request, self.template, {"form": form})
if action and action == "update" and id:
return self.put(request, id=id)
if action and action == "delete" and id:
return self.delete(request, id=id)
def post(self, request, action=None, id=None, **kwargs):
if id:
data = MMarketplaceSettings.objects.get(id=id)
form = self.form_settings(request.POST, instance=data)
else:
form = self.form_settings(request.POST)
if form.is_valid():
form.save()
return redirect("/marketplaces")
def put(self, request, action=None, id=None, **kwargs):
data = MMarketplaceSettings.objects.get(id=id)
form = self.form_settings(initial=model_to_dict(data))
return render(request, self.template, {"form": form})
def delete(self, request, action=None, id=None, **kwargs):
data = MMarketplaceSettings.objects.get(id=id)
data.delete()
return redirect("marketplace")
| mamazinho/sellermp | marketplace/views.py | views.py | py | 3,271 | python | en | code | 0 | github-code | 50 |
608269358 | import requests
import json
import os
baseurl = "https://circabc.acceptance.europa.eu/share"
API_KEY = "YOUR-API-KEY"
file_to_upload = "./text.txt"
##### login #####
url = baseurl+"/webservice/login"
headers = {
"Content-Type": "application/json",
"X-API-KEY": API_KEY
}
response = requests.post(url, headers=headers)
user_id = response.json()["userId"]
print(f"Login successful. User ID: {user_id}")
##### list shared files #####
url = baseurl+"/webservice/user/{}/files/fileInfoUploader".format(user_id)
params = {
"pageSize": 10, "pageNumber": 0
}
response = requests.get(url, headers=headers,params=params)
files = response.json()
print(f"List shared files successful. Files {files}")
###### request file space #####
url = baseurl+"/webservice/file/fileRequest"
payload = json.dumps({
"expirationDate": "2023-06-30",
"hasPassword": False,
"name": os.path.basename(file_to_upload),
"size": 1024,
"sharedWith":[{"email":""}],"downloadNotification":False
})
response = requests.post(url, headers=headers,data=payload)
file_id = response.json()["fileId"]
print(f"File space request successful. File ID: {file_id}")
##### upload file #####
url = baseurl+"/webservice/file/{}/fileRequest/fileContent".format(file_id)
file_path = file_to_upload
headers.pop("Content-Type")
files = {"file": ("text.txt", open(file_path, "rb"), "application/octet-stream")}
response = requests.post(url, headers=headers,files=files)
print(f"File upload successful. File ID: {file_id}")
##### list shared files #####
url = baseurl+"/webservice/user/{}/files/fileInfoUploader".format(user_id)
params = {
"pageSize": 10, "pageNumber": 0
}
response = requests.get(url, headers=headers,params=params)
files = response.json()
print(f"List shared files successful. Files {files}")
| CIRCABC/EUShare | client/python/client.py | client.py | py | 1,808 | python | en | code | 3 | github-code | 50 |
36556420690 |
# void foo(){ z = x + y; cout << x; return 0; } Ctrl + /
"""
def foo():
z = x + y
print(z)
return 0
"""
# base_year = 1000
#
# year = 5 + 2 * 3 - base_year
# year_in_different_form = 5 + 20 * 3 - base_year
#
# epoch = year / year_in_different_form
#
# x = (year,
# epoch,
# year_in_different_form,
# base_year)
# x = 5
# print(x/2 - x//2)
# True
# False
# 5 - integer
# 3.14 float
# 'H' - char
# name = input("Enter your name: ")
# base_str = "Hello {name_user}"
# result = base_str.format(name_user=name)
# result = f'Hello {name}'
# print(result)
# age = int(input("Enter age: "))
#
# if age <= 18: # -> True
# if age <= 7:
# print("You are child!")
# else:
# print("You are young!")
# elif age > 18 and age < 40:
# print("You are in a best age!")
# else: # -> False
# print("I can't identify your age.")
#
# print("End")
year = int(input("Enter year of birth: "))
CURRENT_YEAR = 2022
if year > 0 and year < CURRENT_YEAR:
user_age = CURRENT_YEAR - year
print(f"Your age is {user_age}")
else:
print(f"Entered year is not in bounds. Please enter value in 0 to {CURRENT_YEAR}")
| AH0HIM/hillel_ikonnikov | 2_lesson/2_1_classwork/2_1_1_main.py | 2_1_1_main.py | py | 1,158 | python | en | code | 0 | github-code | 50 |
31476452534 | import os
from time import sleep
import sys
containers = int(sys.argv[1])
ram_limit = 1.5
for i in range(containers):
command = f"docker run --rm -d -p 5{i:03d}:5000 " \
f"--mount type=bind,source=C:\\SatelliteImagesBIGDATA,target=/SatelliteImagesBIGDATA " \
f"--cpus=1 " \
f"--memory={ram_limit}g " \
f"ndvi-compute-executor"
print(command)
output = os.system(command)
# Wait for containers initialization
sleep(5)
for i in range(1, containers+1):
os.system(f"python master/master.py {i} 1")
sleep(10)
| ignacyr/NDVI-distributed-compute | auto-test.py | auto-test.py | py | 585 | python | en | code | 0 | github-code | 50 |
36339178903 | import asyncio
import unittest
from k2.aeon import Aeon, SiteModule, Response
class SimpleSiteModule(SiteModule):
async def handle(self, request):
return Response(
data='{method}: url={url},args={args},data={data},headers={headers}'.format(
method=request.method,
url=request.url,
args=str(request.args),
data=str(request.data),
headers=str(request.headers),
),
code=200,
)
LOOP = asyncio.get_event_loop()
class TestSM(unittest.TestCase):
def setUp(self):
self.aeon = Aeon(
namespace={
r'^/': SimpleSiteModule(),
r'^/not_found/': Response(code=404),
},
)
def request(
self,
url,
headers=None,
args=None,
data=None,
method='GET',
http_version='HTTP/1.1',
code=200,
):
resp = LOOP.run_until_complete(self.aeon.emulate_request(url, headers, args, data, method, http_version))
self.assertEqual(resp.code, code)
data = '{method}: url={url},args={args},data={data},headers={headers}'.format(
method=method,
url=url,
args=str(args or {}),
data=str(data or b''),
headers=str(headers or {}),
)
if 200 <= resp.code < 300:
self.assertEqual(resp.data, data)
return resp
def test_url(self):
self.request('/abc')
def test_code(self):
self.request('/not_found/', code=404)
def test_method(self):
self.request('/', method='POST')
def test_args(self):
self.request('/', args={'a': 123})
def test_data(self):
self.request('/', data='-- Some Data --')
def test_headers(self):
self.request('/', headers={'x-test-header': 'test-value; ???'})
| moff4/k2 | test/aeon/sm.py | sm.py | py | 1,911 | python | en | code | 0 | github-code | 50 |
33087601051 | """Tools for retrieving the user's configs for the ospool tools"""
import sqlite3
import os
import pwd
import pathlib
def _get_home_dir():
home = os.environ.get("HOME")
if home:
return home
return pwd.getpwuid(os.geteuid()).pw_dir
def _get_state_dir():
state_base = pathlib.Path(os.environ.get("XDG_STATE_HOME", os.path.join(_get_home_dir(), ".local/state")))
state_base /= "ospool"
os.makedirs(state_base, mode=0o700, exist_ok=True)
return state_base
def _get_state_db(read_only=False):
if read_only:
conn = sqlite3.connect("file:{}?mode=ro".format(_get_state_dir() / "state.db"), uri=True)
else:
conn = sqlite3.connect(str(_get_state_dir() / "state.db"))
with conn:
all_tables = set(row[0] for row in conn.execute("SELECT name FROM sqlite_master WHERE type='table'"))
if 'pool_history' not in all_tables:
conn.execute("CREATE TABLE pool_history (name text)")
return conn
def get_pool_history():
"""
Return a set of all pools used in the recorded history
"""
try:
conn = _get_state_db(read_only=True)
with conn:
return set(row[0] for row in conn.execute("SELECT name FROM pool_history"))
except:
return set()
return all_pools
def add_pool_history(pool):
"""
Record a pool has been seen in the
"""
# Opening a SQLite3 DB read-write requires a lock; these can be heavy for shared FS.
# Since we will write to the DB only the first time the pool is used (and there aren't
# that many pools around!), optimistically assume we can get away with read-only.
if pool in get_pool_history():
return
with _get_state_db() as conn:
conn.execute("INSERT INTO pool_history VALUES (?)", (pool, ));
| bbockelm/ospool | src/ospool/utils/config.py | config.py | py | 1,822 | python | en | code | 0 | github-code | 50 |
6480794989 | import datetime
from random import randint
import pytest
from hamcrest import assert_that, calling, raises
from mongoengine.errors import ValidationError
from backend_tests.framework.asserts import assert_data_are_equal
from database.models import User
def check_review_data(review_document, exp_data):
owner_id = exp_data.get('added_by')
assert_data_are_equal(
{'mark': [review_document.mark, exp_data['mark']],
'comment': [review_document.comment, exp_data.get('comment')],
'created_at': [review_document.created_at, exp_data.get('created_at')],
'added_by': [review_document.added_by, User.objects.get(id=owner_id) if owner_id else None]})
class TestReviewModel:
@pytest.mark.parametrize('comment', ['some_comment_{}'.format(randint(1, 10000)), None])
def test_create_review(self, create_user, create_dish, create_review, comment):
review_data = dict(dish=create_dish(),
added_by=create_user().id,
mark=randint(1, 5),
comment=comment,
created_at=datetime.datetime.now())
new_review = create_review(**review_data)
check_review_data(new_review, review_data)
@pytest.mark.skip(reason='Непонятное поле')
def test_create_review_without_user(self, create_dish, create_user, create_review):
assigned_dish = create_dish()
assert_that(calling(create_review).with_args(dish=assigned_dish,
added_by=None), raises(ValidationError))
@pytest.mark.parametrize('mark', [0, 6])
def test_create_review_wrong_mark(self, create_dish, create_user, create_review, mark):
assigned_dish = create_dish()
owner_id = create_user().id
assert_that(calling(create_review).with_args(dish=assigned_dish,
added_by=owner_id,
mark=mark), raises(ValidationError))
def test_review_delete_user(self, create_dish, create_user, create_review):
owner = create_user(with_deleting=False)
dish = create_dish()
new_review = create_review(dish=dish,
added_by=owner.id)
owner.delete()
dish.reload()
| nelaluno/lunch_menu | backend_tests/tests_models/test_review.py | test_review.py | py | 2,354 | python | en | code | 0 | github-code | 50 |
5083019881 | import shutil
import tempfile
from ..forms import PostForm
from ..models import Post, Group
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django.contrib.auth import get_user_model
from http import HTTPStatus
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
User = get_user_model()
# Создаем временную папку для медиа-файлов;
# на момент теста медиа папка будет переопределена
# Для сохранения media-файлов в тестах будет использоваться
# временная папка TEMP_MEDIA_ROOT, а потом мы ее удалим
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TaskCreateFormTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Создаем запись в базе данных для проверки сушествующего slug
cls.user = User.objects.create_user(username='TestUser')
cls.group = Group.objects.create(title='Тестовая группа',
slug='test_slug',
description='Тестовое описание')
# в этом методе создаем только группу
# cls.post = Post.objects.create(text='Первый пост', group=cls.group,
# author=cls.user)
# Создаем форму, если нужна проверка атрибутов
cls.form = PostForm()
def setUp(self):
self.guest_client = Client()
self.user = TaskCreateFormTests.user
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
self.uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def test_create_task(self):
"""Валидная форма создает запись в Post."""
# Создаем первый пост и проверяем статус запроса
response = self.authorized_client.post(
reverse('posts:profile',
kwargs={
'username': TaskCreateFormTests.user.username
}),
data={
'text': 'Test post',
'group': TaskCreateFormTests.group.id
},
follow=True
)
self.assertEqual(response.status_code, HTTPStatus.OK)
upload = self.uploaded
form_data = {
'text': 'Test post',
'group': TaskCreateFormTests.group.id,
'author': TaskCreateFormTests.user,
'image': upload,
}
# Отправляем POST-запрос
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
# Проверяем, сработал ли редирект
self.assertRedirects(
response, reverse(
'posts:profile',
kwargs={
'username': TaskCreateFormTests.user.username
}
)
)
# Получаем пост и проверяем все его проперти
post = Post.objects.first()
self.assertEqual(post.text, 'Test post')
self.assertEqual(post.author, self.user)
self.assertEqual(post.group, TaskCreateFormTests.group)
self.assertEqual(Post.objects.count(), 1)
self.assertEqual(post.image, 'posts/small.gif')
def test_authorized_user_edit_post(self):
# проверка редактирования записи авторизованным пользователем
post = Post.objects.create(
text='post_text',
author=self.user
)
form_data = {
'text': 'post_text_edit',
'group': self.group.id
}
response = self.authorized_client.post(
reverse(
'posts:post_edit',
args=[post.id]),
data=form_data,
follow=True
)
self.assertRedirects(
response,
reverse('posts:post_detail', kwargs={'post_id': post.id})
)
post = Post.objects.first()
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(post.text, form_data['text'])
self.assertEqual(post.author, self.user)
self.assertEqual(post.group.id, form_data['group'])
def test_nonauthorized_user_create_post(self):
# проверка создания записи не авторизованным пользователем
form_data = {
'text': 'non_auth_edit_text',
'group': self.group.id
}
response = self.guest_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(
response,
('/auth/login/?next=/create/')
)
self.assertEqual(Post.objects.count(), 0)
| Xostyara/hw05_final | yatube/posts/tests/test_forms.py | test_forms.py | py | 5,879 | python | ru | code | 0 | github-code | 50 |
14952718570 | class Solution1(object):
def setZeroes(self, matrix):
r_set, c_set = set(), set()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
r_set.add(i)
c_set.add(j)
for i in r_set:
for j in range(len(matrix[0])):
matrix[i][j] = 0
for i in range(len(matrix)):
for j in c_set:
matrix[i][j] = 0
class Solution(object):
def setZeroes(self, matrix):
if len(matrix) == 0:
return
R = len(matrix)
C = len(matrix[0])
is_col = False
for i in range(R):
if matrix[i][0] == 0:
is_col = True
for j in range(1, C):
if matrix[i][j] == 0:
matrix[i][0] = 0
matrix[0][j] = 0
for i in range(1, R):
for j in range(1, C):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if matrix[0][0] == 0:
for j in range(C):
matrix[0][j] = 0
if is_col:
for i in range(R):
matrix[i][0] = 0
if __name__ == '__main__':
matrix1 = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]
matrix2 = [[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]]
matrix3 = [[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]]
matrix4 = [[1, 0]]
sol = Solution()
sol.setZeroes(matrix1)
sol.setZeroes(matrix2)
sol.setZeroes(matrix3)
sol.setZeroes(matrix4)
print(matrix1)
print(matrix2)
print(matrix3)
print(matrix4) | mei-t/algorithm_study | LeetCode/set_matrix_zeroes.py | set_matrix_zeroes.py | py | 1,698 | python | en | code | 0 | github-code | 50 |
42275532853 | import time
import numpy as np
import torch
import torchcontrol as toco
from torchcontrol.transform import Rotation as R
from torchcontrol.transform import Transformation as T
from polymetis import RobotInterface, GripperInterface
DEFAULT_MAX_ITERS = 3
# Sampling params
GP_RANGE_UPPER = [0.7, 0.1, np.pi / 2]
GP_RANGE_LOWER = [0.4, -0.1, -np.pi / 2]
# Grasp params
REST_POSE = ([0.5, 0.0, 0.7], [1.0, 0.0, 0.0, 0.0])
PREGRASP_HEIGHT = 0.55
GRASP_HEIGHT = 0.25
PLANNER_DT = 0.02
class ManipulatorSystem:
def __init__(self, robot_kwargs={}, gripper_kwargs={}):
self.arm = RobotInterface(**robot_kwargs)
self.gripper = GripperInterface(**gripper_kwargs)
time.sleep(0.5)
# Set continuous control policy
self.reset_policy()
# Reset to rest pose
self.rest_pos = torch.Tensor(REST_POSE[0])
self.rest_quat = torch.Tensor(REST_POSE[1])
self.reset()
def __del__(self):
self.arm.terminate_current_policy()
def reset(self, time_to_go=2.0):
self.move_to(self.rest_pos, self.rest_quat, time_to_go)
self.open_gripper()
def reset_policy(self):
# Go home
self.arm.go_home()
# Send PD controller
joint_pos_current = self.arm.get_joint_positions()
policy = toco.policies.CartesianImpedanceControl(
joint_pos_current=joint_pos_current,
Kp=torch.Tensor(self.arm.metadata.default_Kx),
Kd=torch.Tensor(self.arm.metadata.default_Kxd),
robot_model=self.arm.robot_model,
)
self.arm.send_torch_policy(policy, blocking=False)
def move_to(self, pos, quat, time_to_go=2.0):
"""
Attempts to move to the given position and orientation by
planning a Cartesian trajectory (a set of min-jerk waypoints)
and updating the current policy's target to that
end-effector position & orientation.
Returns (num successes, num attempts)
"""
# Plan trajectory
pos_curr, quat_curr = self.arm.get_ee_pose()
N = int(time_to_go / PLANNER_DT)
waypoints = toco.planning.generate_cartesian_space_min_jerk(
start=T.from_rot_xyz(R.from_quat(quat_curr), pos_curr),
goal=T.from_rot_xyz(R.from_quat(quat), pos),
time_to_go=time_to_go,
hz=1 / PLANNER_DT,
)
# Execute trajectory
t0 = time.time()
t_target = t0
successes = 0
for i in range(N):
# Update traj
ee_pos_desired = waypoints[i]["pose"].translation()
ee_quat_desired = waypoints[i]["pose"].rotation().as_quat()
# ee_twist_desired = waypoints[i]["twist"]
self.arm.update_current_policy(
{
"ee_pos_desired": ee_pos_desired,
"ee_quat_desired": ee_quat_desired,
# "ee_vel_desired": ee_twist_desired[:3],
# "ee_rvel_desired": ee_twist_desired[3:],
}
)
# Check if policy terminated due to issues
if self.arm.get_previous_interval().end != -1:
print("Interrupt detected. Reinstantiating control policy...")
time.sleep(3)
self.reset_policy()
break
else:
successes += 1
# Spin once
t_target += PLANNER_DT
t_remaining = t_target - time.time()
time.sleep(max(t_remaining, 0.0))
# Wait for robot to stabilize
time.sleep(0.2)
return successes, N
def close_gripper(self):
self.gripper.grasp(speed=0.1, force=1.0)
time.sleep(0.5)
# Check state
state = self.gripper.get_state()
assert state.width < state.max_width
def open_gripper(self):
max_width = self.gripper.get_state().max_width
self.gripper.goto(width=max_width, speed=0.1, force=1.0)
time.sleep(0.5)
# Check state
state = self.gripper.get_state()
assert state.width > 0.0
def grasp_pose_to_pos_quat(self, grasp_pose, z):
x, y, rz = grasp_pose
pos = torch.Tensor([x, y, z])
quat = (
R.from_rotvec(torch.Tensor([0, 0, rz])) * R.from_quat(self.rest_quat)
).as_quat()
return pos, quat
def grasp(self, grasp_pose0, grasp_pose1):
results = []
# Move to pregrasp
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, PREGRASP_HEIGHT)
results.append(self.move_to(pos, quat))
# Lower (slower than other motions to prevent sudden collisions)
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, GRASP_HEIGHT)
results.append(self.move_to(pos, quat, time_to_go=4.0))
# Grasp
self.close_gripper()
# Lift to pregrasp
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, PREGRASP_HEIGHT)
results.append(self.move_to(pos, quat))
# Move to new pregrasp
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose1, PREGRASP_HEIGHT)
results.append(self.move_to(pos, quat))
# Release
self.open_gripper()
# Reset
self.reset()
total_successes = sum([r[0] for r in results])
total_tries = sum([r[1] for r in results])
return total_successes, total_tries
def continuously_grasp(self, max_iters=1000):
# Setup sampling
gp_range_upper = torch.Tensor(GP_RANGE_UPPER)
gp_range_lower = torch.Tensor(GP_RANGE_LOWER)
# Perform grasping
i = 0
total_successes, total_tries = 0, 0
try:
while True:
# Sample grasp
grasp_pose0 = uniform_sample(gp_range_lower, gp_range_upper)
grasp_pose1 = uniform_sample(gp_range_lower, gp_range_upper)
# Perform grasp
print(f"Grasp {i + 1}: grasp={grasp_pose0}, release={grasp_pose1}")
n_successes, n_tries = self.grasp(grasp_pose0, grasp_pose1)
total_successes += n_successes
total_tries += n_tries
# Loop termination
i += 1
if max_iters > 0 and i >= max_iters:
break
except KeyboardInterrupt:
print("Interrupted by user.")
return total_successes, total_tries
def uniform_sample(lower, upper):
return lower + (upper - lower) * torch.rand_like(lower)
| PradeepKadubandi/fairo | polymetis/polymetis/python/polymetis/utils/continuous_grasper.py | continuous_grasper.py | py | 6,536 | python | en | code | null | github-code | 50 |
21485553153 | from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
app_name = 'users_regs'
urlpatterns = [
path('register', views.register, name='Register'),
path('register/add_lib', views.AddLibraryUser, name='add_libuser'),
path('register/confirm', views.ConfirmEmail, name='confirm_mail'),
path('register/send', views.sender, name='Mail'),
path('login', views.Login_View.as_view(), name='login'),
path('logout/', views.Logout, name='logout'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| rafaeldtr41/Alexandria | Alexandria/users_regs/urls.py | urls.py | py | 639 | python | en | code | 4 | github-code | 50 |
11317546725 | # encoding: utf-8
import torch
import cv2
import numpy as np
import pdb
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (tensor) [batch, num_gt, 5]
batch of annotations stacked on their 0 dim
annotations for a given image are stacked on 1 dim
"""
targets = []
imgs = []
# numpy array
num_gts = [sample[1].shape[0] for sample in batch]
max_num_gt = max(num_gts)
for sample in batch:
imgs.append(sample[0])
size_gt = sample[1].shape
num_gt = size_gt[0]
aug_size = list(size_gt[:])
aug_size[0] = max_num_gt
aug_gt = np.zeros(aug_size, dtype=sample[1].dtype)
aug_gt[:num_gt] = sample[1]
targets.append(torch.FloatTensor(aug_gt))
return torch.stack(imgs, 0), torch.stack(targets, 0)
def base_transform(image, size, mean):
x = cv2.resize(image, (size, size)).astype(np.float32)
x -= mean
x = x.astype(np.float32)
return x
class BaseTransform:
"""
For evaluation and testing.
"""
def __init__(self, size, mean):
self.size = size
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, boxes=None, labels=None):
return base_transform(image, self.size, self.mean), boxes, labels
| dd604/refinedet.pytorch | libs/data_layers/transform.py | transform.py | py | 1,659 | python | en | code | 36 | github-code | 50 |
31058614829 | import sys
import os
import http.client
import urllib.request
import json
import importlib
from datetime import datetime
import time
import VersionDetect.detect as version_detect # Version detection
import deepscans.core as advanced # Deep scan and Version Detection functions
import cmseekdb.basic as cmseek # All the basic functions
import cmseekdb.sc as source # Contains function to detect cms from source code
import cmseekdb.header as header # Contains function to detect CMS from gathered http headers
import cmseekdb.cmss as cmsdb # Contains basic info about the CMSs
import cmseekdb.dirscheck as dirscheck # Containts function to detect CMS by directory checks
import cmseekdb.robots as robots
import cmseekdb.generator as generator
import cmseekdb.result as result
def main_proc(site,cua):
# Check for skip_scanned
if cmseek.skip_scanned:
for csite in cmseek.report_index['results'][0]:
if site == csite and cmseek.report_index['results'][0][site]['cms_id'] != '':
cmseek.warning('Skipping {0} as it was previously scanned!'.format(cmseek.red + site + cmseek.cln))
return
cmseek.clearscreen()
cmseek.banner("CMS Detection And Deep Scan")
cmseek.info("Scanning Site: " + site)
cmseek.statement("User Agent: " + cua)
cmseek.statement("Collecting Headers and Page Source for Analysis")
init_source = cmseek.getsource(site, cua)
if init_source[0] != '1':
cmseek.error("Aborting CMSeek! Couldn't connect to site \n Error: %s" % init_source[1])
return
else:
scode = init_source[1]
headers = init_source[2]
if site != init_source[3] and site + '/' != init_source[3]:
if cmseek.redirect_conf == '0':
cmseek.info('Target redirected to: ' + cmseek.bold + cmseek.fgreen + init_source[3] + cmseek.cln)
if not cmseek.batch_mode:
follow_redir = input('[#] Set ' + cmseek.bold + cmseek.fgreen + init_source[3] + cmseek.cln + ' as target? (y/n): ')
else:
follow_redir = 'y'
if follow_redir.lower() == 'y':
site = init_source[3]
cmseek.statement("Reinitiating Headers and Page Source for Analysis")
tmp_req = cmseek.getsource(site, cua)
scode = tmp_req[1]
headers = tmp_req[2]
elif cmseek.redirect_conf == '1':
site = init_source[3]
cmseek.info("Followed redirect, New target: " + cmseek.bold + cmseek.fgreen + init_source[3] + cmseek.cln)
cmseek.statement("Reinitiating Headers and Page Source for Analysis")
tmp_req = cmseek.getsource(site, cua)
scode = tmp_req[1]
headers = tmp_req[2]
else:
cmseek.statement("Skipping redirect to " + cmseek.bold + cmseek.red + init_source[3] + cmseek.cln)
cmseek.update_log('target_url', init_source[3].rstrip('/'))
if scode == '':
# silly little check thought it'd come handy
cmseek.error('Aborting detection, source code empty')
return
cmseek.statement("Detection Started")
## init variables
cms = '' # the cms id if detected
cms_detected = '0' # self explanotory
detection_method = '' # ^
ga = '0' # is generator available
ga_content = '' # Generator content
#print(scode)
## Parse generator meta tag
parse_generator = generator.parse(scode)
ga = parse_generator[0]
ga_content = parse_generator[1]
cmseek.statement("Using headers to detect CMS (Stage 1 of 5)")
header_detection = header.check(headers)
if header_detection[0] == '1':
detection_method = 'header'
cms = header_detection[1]
cms_detected = '1'
if cms_detected == '0':
if ga == '1':
# cms detection via generator
cmseek.statement("Using Generator meta tag to detect CMS (Stage 2 of 5)")
gen_detection = generator.scan(ga_content)
if gen_detection[0] == '1':
detection_method = 'generator'
cms = gen_detection[1]
cms_detected = '1'
else:
cmseek.statement('Skipping stage 2 of 5: No Generator meta tag found')
if cms_detected == '0':
# Check cms using source code
cmseek.statement("Using source code to detect CMS (Stage 3 of 5)")
source_check = source.check(scode, site)
if source_check[0] == '1':
detection_method = 'source'
cms = source_check[1]
cms_detected = '1'
if cms_detected == '0':
# Check cms using robots.txt
cmseek.statement("Using robots.txt to detect CMS (Stage 4 of 5)")
robots_check = robots.check(site, cua)
if robots_check[0] == '1':
detection_method = 'robots'
cms = robots_check[1]
cms_detected = '1'
if cms_detected == '0':
# Check cms using directory checks
cmseek.statement("Using directories to detect CMS (Stage 5 of 5)")
dirs_check = dirscheck.check(site, cua)
if dirs_check[0] == '1':
detection_method = 'dirscheck'
cms = dirs_check[1]
cms_detected = '1'
if cms_detected == '1':
cmseek.success('CMS Detected, CMS ID: ' + cmseek.bold + cmseek.fgreen + cms + cmseek.cln + ', Detection method: ' + cmseek.bold + cmseek.lblue + detection_method + cmseek.cln)
cmseek.update_log('detection_param', detection_method)
cmseek.update_log('cms_id', cms) # update log
cmseek.statement('Getting CMS info from database') # freaking typo
cms_info = getattr(cmsdb, cms)
if cms_info['deeps'] == '1' and not cmseek.light_scan and not cmseek.only_cms:
# cmseek.success('Starting ' + cmseek.bold + cms_info['name'] + ' deep scan' + cmseek.cln)
advanced.start(cms, site, cua, ga, scode, ga_content, detection_method, headers)
return
elif cms_info['vd'] == '1' and not cmseek.only_cms:
cmseek.success('Starting version detection')
cms_version = '0' # Failsafe measure
cms_version = version_detect.start(cms, site, cua, ga, scode, ga_content, headers)
cmseek.clearscreen()
cmseek.banner("CMS Scan Results")
result.target(site)
result.cms(cms_info['name'],cms_version,cms_info['url'])
cmseek.update_log('cms_name', cms_info['name']) # update log
if cms_version != '0' and cms_version != None:
cmseek.update_log('cms_version', cms_version) # update log
cmseek.update_log('cms_url', cms_info['url']) # update log
comptime = round(time.time() - cmseek.cstart, 2)
log_file = os.path.join(cmseek.log_dir, 'cms.json')
result.end(str(cmseek.total_requests), str(comptime), log_file)
'''
cmseek.result('Target: ', site)
cmseek.result("Detected CMS: ", cms_info['name'])
cmseek.update_log('cms_name', cms_info['name']) # update log
if cms_version != '0' and cms_version != None:
cmseek.result("CMS Version: ", cms_version)
cmseek.update_log('cms_version', cms_version) # update log
cmseek.result("CMS URL: ", cms_info['url'])
cmseek.update_log('cms_url', cms_info['url']) # update log
'''
return
else:
# nor version detect neither DeepScan available
cmseek.clearscreen()
cmseek.banner("CMS Scan Results")
result.target(site)
result.cms(cms_info['name'],'0',cms_info['url'])
cmseek.update_log('cms_name', cms_info['name']) # update log
cmseek.update_log('cms_url', cms_info['url']) # update log
comptime = round(time.time() - cmseek.cstart, 2)
log_file = os.path.join(cmseek.log_dir, 'cms.json')
result.end(str(cmseek.total_requests), str(comptime), log_file)
'''
cmseek.result('Target: ', site)
cmseek.result("Detected CMS: ", cms_info['name'])
cmseek.update_log('cms_name', cms_info['name']) # update log
cmseek.result("CMS URL: ", cms_info['url'])
cmseek.update_log('cms_url', cms_info['url']) # update log
'''
return
else:
print('\n')
cmseek.error('CMS Detection failed, if you know the cms please help me improve CMSeeK by reporting the cms along with the target by creating an issue')
print('''
{2}Create issue:{3} https://github.com/Tuhinshubhra/CMSeeK/issues/new
{4}Title:{5} [SUGGESTION] CMS detction failed!
{6}Content:{7}
- CMSeeK Version: {0}
- Target: {1}
- Probable CMS: <name and/or cms url>
N.B: Create issue only if you are sure, please avoid spamming!
'''.format(cmseek.cmseek_version, site, cmseek.bold, cmseek.cln, cmseek.bold, cmseek.cln, cmseek.bold, cmseek.cln))
return
return
| Tuhinshubhra/CMSeeK | cmseekdb/core.py | core.py | py | 9,184 | python | en | code | 2,100 | github-code | 50 |
17814324509 | """
config.py
author: gsatas
date: 2020-05-04
"""
from decifer.process_input import PURITY
THRESHOLD=0.05
class Config:
def __init__(self, mut_state, other_states, cn_props, desc_set, dcf_mode = True):
'''
mut_state: 2-tuple that indicates the CN state that the
mutation occurs in
other_states: a list of 3-tuples indicating all other mutation
states
cn_props: a dict of lists, mapping 2-tuple CN states (keys) to proportions for each sample (list value)
'''
self.mut_state = mut_state
self.other_states = other_states
self.cn_props = cn_props
self.desc_set = desc_set
self.dcf_mode = dcf_mode
def F(self, sample):
'''returns fractional copy number F'''
return self.cn_props[self.mut_state][sample] * sum(self.mut_state) \
+ sum([self.cn_props[s[:2]][sample] * sum(s[:2]) for s in self.other_states])
def cf_bounds(self, sample):
if self.dcf_mode:
return self.d_bounds(sample)
else:
return self.c_bounds(sample)
def c_bounds(self, sample):
M = sum([self.cn_props[s[:2]][sample] for s in self.other_states if s[2] > 0])
return M, M + self.cn_props[self.mut_state[:2]][sample]
def d_bounds(self, sample):
M = sum([self.cn_props[s[:2]][sample] for s in self.other_states if s in self.desc_set])
return M, M + self.cn_props[self.mut_state[:2]][sample]
#def M(self):
# return sum([self.cn_props[s[:2]] * (s[2]-1) for s in self.other_states if s[2] > 0])
def c(self, lam, sample):
return self.cn_props[self.mut_state][sample] * lam \
+ sum([self.cn_props[s[:2]][sample] for s in self.other_states if s[2] >= 1])
def v(self, lam, sample):
F = self.F(sample)
val = self.cn_props[self.mut_state][sample] * lam \
+ sum([self.cn_props[s[:2]][sample] * s[2] for s in self.other_states])
return 1./F * val
def d(self, lam, sample):
# multiplies lam by SSCN CN proportion in which mutation arose, canceling out earlier division in v_to_lam
return self.cn_props[self.mut_state][sample] * lam \
+ sum([self.cn_props[s[:2]][sample] for s in self.other_states if s in self.desc_set])
def c_to_lam(self, c, sample):
if self.cn_props[self.mut_state][sample] == 0: return 0
lam = (c - sum([self.cn_props[s[:2]][sample] for s in self.other_states if s[2] >= 1]))/self.cn_props[self.mut_state][sample]
return lam
def v_to_lam(self, v, sample):
if self.cn_props[self.mut_state][sample] == 0: return 0
# sum term iterates across other_states in which m (of (x,y,m)) >=1
# multiplies CN proportions for these states by m
# and divides by the SSCN CN proportion in which mutation arose
# note: other_states include any genotype (x,y,m) with CN state != mut_state
lam = (v*self.F(sample) - sum([self.cn_props[s[:2]][sample]*s[2] for s in self.other_states if s[2] >= 1]))/self.cn_props[self.mut_state][sample]
return lam
def d_to_lam(self, d, sample):
if self.cn_props[self.mut_state][sample] == 0: return 0
lam = (d - sum([self.cn_props[s[:2]][sample] for s in self.other_states if s in self.desc_set]))/self.cn_props[self.mut_state][sample]
return lam
def v_to_cf(self, v, sample, truncate = True):
# calls d_to_v or c_to_v depending on dcf_mode
if self.dcf_mode:
cf = self.v_to_d(v, sample, truncate)
else:
cf = self.v_to_c(v, sample, truncate)
return min(max(cf, 0.0), PURITY[sample])
def cf_to_v(self, c, sample, truncate = True):
# calls d_to_v or c_to_v depending on dcf_mode
if not (self.cf_bounds(sample)[0] - THRESHOLD <= c <= self.cf_bounds(sample)[1] + THRESHOLD):
return False
if self.dcf_mode:
v = self.d_to_v(c, sample)
else:
v = self.c_to_v(c, sample)
assert v is False or 0.0 <= v <= 1.0
return v
def c_to_v(self, c, sample):
lam = self.c_to_lam(c, sample)
v = self.v(lam, sample)
if (lam > -THRESHOLD and lam < self.lam_limit(sample) + THRESHOLD):
if v < 0: return 0.0
if v > 1: return 1.0
return v
else: return False
def v_to_c(self, v, sample, truncate = True):
# If truncrate is true, then it returns False when there is no feasible
# ccf that would result in v. If truncate is False, then it returns the
# nearest feasible ccf. The latter is used for visualization purposes mainly
lam = self.v_to_lam(v, sample)
c = self.c(lam, sample)
if truncate:
if (lam > -THRESHOLD and lam < self.lam_limit(sample) + THRESHOLD):
if c < 0: return 0.0
if c > PURITY[sample]: return PURITY[sample]
return c
else: return False
else:
return c
def d_to_v(self, d, sample):
lam = self.d_to_lam(d, sample)
v = self.v(lam, sample)
if (lam > -THRESHOLD and lam < self.lam_limit(sample) + THRESHOLD):
if v < 0: return 0.0
if v > 1: return 1.0
return v
else: return False
def v_to_d(self, v, sample, truncate = True):
# If truncrate is true, then it returns False when there is no feasible
# ccf that would result in v. If truncate is False, then it returns the
# nearest feasible ccf. The latter is used for visualization purposes mainly
lam = self.v_to_lam(v, sample)
d = self.d(lam, sample)
if truncate:
if (lam > -THRESHOLD and lam < self.lam_limit(sample) + THRESHOLD):
if d < 0: return 0.0
if d > PURITY[sample]: return PURITY[sample]
return d
else: return False
else:
return d
def add_sample(self, cn_props, mut_label = None):
# NOTE, this assumes samples are always in the same order in the input file
# and that all copy-number states are included for all samples even if the prop. is 0
try:
# ensure same number of CN states for this sample compared to previous samples
assert(len(cn_props) == len(self.cn_props))
# ensure the CN states are also identical
assert(set(cn_props.keys()) == set(self.cn_props.keys()) )
except AssertionError:
print(cn_props)
print(self.cn_props)
raise Exception("The same copy-number states and proportions must be provided across samples for a mutation, even when the proportion is 0. Mutation {}".format(mut_label))
# append CN proportions for this sample
for c in cn_props:
self.cn_props[c].append(cn_props[c])
def lam_limit(self, sample):
# if mut_state is 1,1,upper bound of lambda is the proportion of diploid cells that are tumor cells
# o.w. upper bound is 1
if self.mut_state == tuple((1,1)):
return PURITY[sample]
else:
return 1.0
| raphael-group/decifer | src/decifer/config.py | config.py | py | 7,278 | python | en | code | 19 | github-code | 50 |
45690606129 | from locust import HttpLocust, TaskSet, task
from flask import json
class UserBehaviour(TaskSet):
@task(10)
def returnall(self):
self.client.get("/lang")
@task(20)
def health_check(self):
self.client.get("/health-check")
@task(30)
def add_one(self):
headers = {'content-type': 'application/json', 'Accept-Encoding': 'gzip'}
self.client.post(url="/lang", data=json.dumps({
"name": "ruby"
}), headers=headers)
@task(30)
def edit_one(self):
print(" in method edit")
headers = {'content-type': 'application/json', 'Accept-Encoding': 'gzip'}
# self.client.put(url="/lang/<string:name>", data=json.dumps({
# "name": "ruby"}), headers=headers)
self.client.put(url="/lang/python", data=json.dumps({
"name": "ruby"}), headers=headers)
@task(10)
def remove_one(self):
headers = {'content-type': 'application/json', 'Accept-Encoding': 'gzip'}
# self.client.delete(url="/lang/<string:name>", data=json.dumps({
# "name": "ruby"}), headers=headers)
self.client.delete(url="/lang/python",headers=headers)
class WebsiteUser(HttpLocust):
task_set = UserBehaviour
min_wait = 5000
max_wait = 15000
| sathish108/pipeline | locustfile.py | locustfile.py | py | 1,296 | python | en | code | 0 | github-code | 50 |
29578740955 | from collection import util
class Star():
def __init__(self):
pass
def __repr__(self):
return '{:1} {:3} {:1} {:>2}'.format(
self.rank,
self.team,
self.position,
self.player
)
def set_stars(self, soup):
star_table = soup.table.table
stars = []
for tr in star_table('tr'):
(rank, team, position, player) = [td.text for td in tr('td')]
# sometimes no stars given, which causes blank team
if len(team) == 0: break
s = Star()
# rank can be inferred by order in list
s.rank = int(rank[0])
s.team = team
s.position = position
s.player = util.get_integer(player)
stars.append(s)
self.stars = stars
| thebend/fantasy | collection/nhlreport/gs/gs_star.py | gs_star.py | py | 650 | python | en | code | 0 | github-code | 50 |
1952734128 | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess, math, pickle, imp
sys.path.append('/home/unix/maxwshen/')
import fnmatch
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
import _data
# Default params
inp_dir = _config.OUT_PLACE + 'be_combin_12kChar_simulated_bystander/'
NAME = util.get_fn(__file__)
treat_control_df = pd.read_csv(_config.DATA_DIR + 'treatment_control_design.csv', index_col = 0)
@util.time_dec
def main():
print(NAME)
for idx, row in treat_control_df.iterrows():
treat_nm = row['Treatment']
if 'Cas9' in treat_nm:
continue
lib_nm = _data.get_lib_nm(treat_nm)
if lib_nm != '12kChar': continue
if 'U2OS' in treat_nm: continue
num_targets = 12000
num_targets_per_split = 2000
print(treat_nm)
mdf = pd.DataFrame()
data = None
stats_df = pd.DataFrame()
for start_idx in range(0, num_targets, num_targets_per_split):
stats_fn = inp_dir + '%s_%s_%s_stats.csv' % (treat_nm, start_idx, start_idx + num_targets_per_split - 1)
df = pd.read_csv(stats_fn, index_col = 0)
stats_df = stats_df.append(df, ignore_index = True)
stats_df.to_csv(inp_dir + '%s.csv' % (treat_nm))
print('Done')
return
if __name__ == '__main__':
main() | maxwshen/lib-analysis | be2_combin_12kChar_simbys_combine.py | be2_combin_12kChar_simbys_combine.py | py | 1,378 | python | en | code | 2 | github-code | 50 |
17293847208 | from nmtwizard.preprocess import prepoperator
@prepoperator.register_operator("length_filter")
class LengthFilter(prepoperator.Filter):
def __init__(self, config, process_type, build_state):
source_config = _get_side_config(config, 'source')
target_config = _get_side_config(config, 'target')
filters = []
filters.extend(_get_side_filters(
source_config,
lambda tu: tu.src_detok,
lambda tu: tu.src_tok.tokens[0]))
filters.extend(_get_side_filters(
target_config,
lambda tu: tu.tgt_detok,
lambda tu: tu.tgt_tok.tokens[0]))
min_words_ratio = config.get('min_words_ratio')
if min_words_ratio is not None:
filters.append(lambda tu: (
len(tu.src_tok.tokens[0]) / len(tu.tgt_tok.tokens[0]) < min_words_ratio))
max_words_ratio = config.get('max_words_ratio')
if max_words_ratio is not None:
filters.append(lambda tu: (
len(tu.src_tok.tokens[0]) / len(tu.tgt_tok.tokens[0]) > max_words_ratio))
super(LengthFilter, self).__init__(filters)
def _get_side_config(config, side):
config = config.get(side, {})
# Filter empty sentences by default.
config.setdefault('min_words', 1)
return config
def _get_side_filters(config, chars_fn, words_fn):
filters = []
max_chars = config.get('max_characters')
if max_chars is not None:
filters.append(lambda tu: len(chars_fn(tu)) > max_chars)
max_words = config.get('max_words')
if max_words is not None:
filters.append(lambda tu: len(words_fn(tu)) > max_words)
min_words = config.get('min_words')
if min_words is not None:
filters.append(lambda tu: len(words_fn(tu)) < min_words)
return filters
| alexisdoualle/nmt-serving | nmtwizard/preprocess/operators/length_filter.py | length_filter.py | py | 1,820 | python | en | code | 0 | github-code | 50 |
26977311897 | import random
num_problems = int(input("How many addition problems would you like? "))
total_correct = 0
total_incorrect = 0
for i in range(1, num_problems + 1):
print("Problem %d" % i)
num1 = int(random.random() * 11)
num2 = int(random.random() * 11)
user_response = int(input("%d + %d = " % (num1, num2)))
if num1 + num2 == user_response:
print("That is correct!")
total_correct += 1
else:
print("That is incorrect.")
total_incorrect += 1
print("Total number correct: %d" %total_correct)
print("Total number incorrect: %d" %total_incorrect) | palenq/usc_summerCamp_labs | additionTest.py | additionTest.py | py | 604 | python | en | code | 0 | github-code | 50 |
17371375584 | """
Implemente um programa que gere aleatoriamente um CAPTCHA de seis caracteres, o qual obrigatoriamente deve conter: letras
maiúsculas, letras minúscula e dígitos. O programa deve exibir o CAPTCHA gerado e solicitar que o usuário digite o valor
exibido. Em seguida, o programa deve ler o texto digitado pelo usuário e verificar se este corresponde ao CAPTCHA gerado.
Observe que, durante esta comparação, não se faz diferença entre letras maiúsculas ou minúsculas. O programa deveimprimir
uma mensagem dizendo se o usuário passou ou não do teste.
"""
from random import *
vet_min = []
vet_mai = []
vet_num = []
vet_captcha = []
#Criando vetores
for i in range(25):
vet_min.append(chr(97+i))
for i in range(25):
vet_mai.append(chr(65+i))
for i in range(10):
vet_num.append(i)
continuar = True
minu = False
mai = False
num = False
verificador = False
while continuar:
for i in range(6):
n = randint(0,2)
if n == 0:
vet_captcha.append(vet_min[randint(0,24)])
minu = True
elif n == 1:
vet_captcha.append(vet_mai[randint(0,24)])
mai = True
else:
num = True
vet_captcha.append(vet_num[(randint(0,9))])
if minu == False or mai == False or num == False:
print(vet_captcha)
print("Limpando captcha")
mai = False
minu = False
num = False
vet_captcha.clear()
print(vet_captcha)
else:
continuar = False
captcha = ""
for i in range(6):
captcha = captcha + str(vet_captcha[i])
print("Digite o seguinte CAPTCHA:")
print(captcha)
resposta = str(input("Digite o captcha por favor:"))
if resposta == captcha:
print("Acertou")
else:
print("Errou")
| ItaloRamillys/Trabalhos-Python-UFC---FUP---2017.1 | 1.13.py | 1.13.py | py | 1,819 | python | pt | code | 0 | github-code | 50 |
33553866817 | import sqlite3
import time
import zlib
import string
conn = sqlite3.connect('airq3.sqlite')
cur = conn.cursor()
#future update, values=list() from the start
cur.execute('SELECT County.name, Yr03.value FROM County JOIN Yr03 ON County.id = Yr03.county_id ORDER BY value DESC')
values = dict()
for val_row in cur :
values[val_row[0]] = val_row[1]
#print(values)
#print(values[val_row[0]])
#values = val_row
lvalues = list(values.items())
ordered = sorted(lvalues, key=lambda x: x[1], reverse=True)
top100 = ordered[0:100]
highest = top100[0][1]
print(highest)
lowest = top100[99][1]
print(lowest)
# Spread the font sizes across 20-100 based on the count
bigsize = 50
smallsize = 15
fhand = open('gword.js','w')
fhand.write("gword = [")
first = True
for k in top100:
if not first : fhand.write( ",\n")
first = False
print(k)
size = k[1]
size = (size - lowest) / float(highest - lowest)
size = int((size * bigsize) + smallsize)
fhand.write("{text: '"+k[0]+"', size: "+str(size)+"}")
fhand.write( "\n];\n")
fhand.close()
print("Output written to gword.js")
print("Open gword.htm in a browser to see the vizualization")
| SaltyHobo/Capstone-Python | gword.py | gword.py | py | 1,166 | python | en | code | 0 | github-code | 50 |
74627642396 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flashcards', '0004_merge'),
]
operations = [
migrations.RemoveField(
model_name='user_history',
name='Deck_ID',
),
migrations.RemoveField(
model_name='user_history',
name='User_ID',
),
migrations.DeleteModel(
name='User_History',
),
migrations.AddField(
model_name='card',
name='Categories',
field=models.TextField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='deck',
name='Accessed_Date',
field=models.DateTimeField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='deck',
name='Public',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='Back_Img_ID',
field=models.ForeignKey(related_name=b'Back_Image', blank=True, to='flashcards.Image', null=True),
),
migrations.AlterField(
model_name='card',
name='Back_Text',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='card',
name='Difficulty',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='card',
name='Front_Img_ID',
field=models.ForeignKey(related_name=b'Front_Image', blank=True, to='flashcards.Image', null=True),
),
migrations.AlterField(
model_name='card',
name='Front_Text',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='card',
name='Last_Attempted',
field=models.DateTimeField(null=True, blank=True),
),
]
| latreides/SE_Team3 | flashcards/migrations/0005_auto_20141006_0132.py | 0005_auto_20141006_0132.py | py | 2,205 | python | en | code | 0 | github-code | 50 |
4521912815 | """ miscellaneous utilities
"""
import itertools
from phydat import ptab
def is_odd_permutation(seq1, seq2):
""" Determine whether a permutation of a sequence is odd.
:param seq1: the first sequence
:param seq2: the second sequence, which must be a permuation of the first
:returns: True if the permutation is even, False if it is odd
:rtype: bool
"""
return not is_even_permutation(seq1, seq2)
def is_even_permutation(seq1, seq2):
""" Determine whether a permutation of a sequence is even or odd.
:param seq1: the first sequence
:param seq2: the second sequence, which must be a permuation of the first
:returns: True if the permutation is even, False if it is odd
:rtype: bool
"""
size = len(seq1)
assert sorted(seq1) == sorted(seq2) and len(set(seq1)) == size
perm = [seq2.index(val) for val in seq1]
sgn = 1
for idx in range(size):
if perm[idx] != idx:
sgn *= -1
swap_idx = perm.index(idx)
perm[idx], perm[swap_idx] = perm[swap_idx], perm[idx]
parity = (sgn == 1)
return parity
def equivalence_partition(iterable, relation, perfect=False):
"""Partitions a set of objects into equivalence classes
canned function taken from https://stackoverflow.com/a/38924631
Args:
iterable: collection of objects to be partitioned
relation: equivalence relation. I.e. relation(o1,o2) evaluates to True
if and only if o1 and o2 are equivalent
perfect: is this a perfect equivalence relation, where a = c and b = c
guarantees a = b? if not, an extra search is performed to make sure
that a, b, and c still end up in the same class
Returns: classes, partitions
classes: A sequence of sets. Each one is an equivalence class
"""
# 1. This part only works assuming it is a 'perfect' equivalence relation,
# where a = c and b = c implies a = b
classes = []
for obj in iterable: # for each object
# find the class it is in
found = False
for cls in classes:
# is it equivalent to this class?
if relation(next(iter(cls)), obj):
cls.add(obj)
found = True
break
if not found: # it is in a new class
classes.append(set([obj]))
# 2. Now, account for the possibility of 'imperfect' equivalence relations,
# where the relation gives a = c and b = c, but not a = b, and yet we still
# want a, b, and c to end up in the same class
if not perfect:
new_classes = []
while True:
new_classes = classes.copy()
for cls1, cls2 in itertools.combinations(classes, r=2):
if any(relation(o1, o2)
for o1, o2 in itertools.product(cls1, cls2)):
if cls2 in new_classes:
new_classes.remove(cls2)
cls1 |= cls2
if classes == new_classes:
break
classes = new_classes
return classes
# Useful functions on Python objects
def move_item_to_front(lst, item):
""" Move an item to the front of a list.
:param lst: the list
:type lst: list or tuple
:param item: the item, which must be in `lst`
:returns: the list, with the item moved to front
:rtype: tuple
"""
lst = list(lst)
lst.insert(0, lst.pop(lst.index(item)))
return tuple(lst)
def breakby(lst, elem):
""" Break a list by element, dropping the element itself.
Analogous to '<char>'.split('<string>') for strings.
"""
lsts = tuple(tuple(g) for k, g in
itertools.groupby(lst, lambda x: x == elem) if not k)
return lsts
def separate_negatives(lst):
""" Seperate a list of numbers into negative and nonnegative (>= 0)
"""
neg_lst = tuple(val for val in lst if val < 0)
pos_lst = tuple(val for val in lst if val >= 0)
return neg_lst, pos_lst
def value_similar_to(val, lst, thresh):
""" Check if a value is close to some lst of values within some threshold
"""
return any(abs(val - vali) < thresh for vali in lst)
def scale_iterable(iterable, scale_factor):
""" Scale some type of iterable of floats by a scale factor
"""
if isinstance(iterable, list):
scaled_iterable = list(val * scale_factor for val in iterable)
elif isinstance(iterable, tuple):
scaled_iterable = tuple(val * scale_factor for val in iterable)
return scaled_iterable
def numpy_to_float(iterable):
""" Convert a numpy array to a tuple of floats
"""
return tuple(val.item() for val in iterable)
def remove_duplicates_with_order(lst):
""" Remove all duplicates of a list while not reordering the list.
"""
if isinstance(lst, list):
lst = list(n for i, n in enumerate(lst) if n not in lst[:i])
if isinstance(lst, tuple):
lst = tuple(n for i, n in enumerate(lst) if n not in lst[:i])
return lst
def sort_by_list(lst, ref_lst, include_missing=True):
""" Order the elements of the list by using the priorities given
by some reference lst.
if include_missing:
a=[q, a, e, x, f, t], ref=[x, a, q, e] -> sort_a=[x, a, q, e, f, t]
if not include_missing:
a=[q, a, e, x, f], ref=[x, a, q, e] -> sort_a=[x, a, q, e]
Note that any element in the original list not in original list is
dropped if the user specifies not to include it.
:param lst: list to sort
:type lst: tuple
:param ref_lst: list which sets the order of the previous list
:type ref_lst: tuple
:rtype: tuple
"""
# Split input list by elements in and not in reference list
x_in_ref = tuple(x for x in lst if x in ref_lst)
x_missing = tuple(x for x in lst if x not in ref_lst)
# Sorted list of elements in th reference
sort_lst = tuple(sorted(list(x_in_ref), key=lambda x: ref_lst.index(x)))
# If request append the missing elements
if include_missing:
sort_lst += x_missing
return sort_lst
def formula_from_symbols(symbs):
""" Build a molecular formula from a list of atomic symbols.
(note: dummy atoms will be filtered out and cases will be standardized)
:param symbs: atomic symbols
:type symbs: tuple(str)
:rtype: str
"""
symbs = list(filter(ptab.to_number, map(ptab.to_symbol, symbs)))
return _unique_item_counts(symbs)
def _unique_item_counts(iterable):
""" Build a dictionary giving the count of each unique item in a sequence.
:param iterable: sequence to obtain counts for
:type iterable: iterable object
:rtype: dict[obj: int]
"""
items = tuple(iterable)
return {item: items.count(item) for item in sorted(set(items))}
| Auto-Mech/autochem | automol/util/_util.py | _util.py | py | 6,901 | python | en | code | 3 | github-code | 50 |
17847099395 | #!/usr/bin/env python
#_*_coding:utf-8_*_
import argparse
import re
from collections import Counter
import numpy
import itertools
def readFasta(file):
with open(file) as f:
records = f.read()
if re.search('>', records) == None:
print('The input file seems not in fasta format.')
sys.exit(1)
records = records.split('>')[1:]
myFasta = []
for fasta in records:
array = fasta.split('\n')
name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '-', ''.join(array[1:]).upper())
myFasta.append([name, sequence])
return myFasta
def Count(seq1, seq2):
sum = 0
for aa in seq1:
sum = sum + seq2.count(aa)
return sum
def CTDC(fatas):
group1 = {
'hydrophobicity_PRAM900101': 'RKEDQN',
'hydrophobicity_ARGP820101': 'QSTNGDE',
'hydrophobicity_ZIMJ680101': 'QNGSWTDERA',
'hydrophobicity_PONP930101': 'KPDESNQT',
'hydrophobicity_CASG920101': 'KDEQPSRNTG',
'hydrophobicity_ENGD860101': 'RDKENQHYP',
'hydrophobicity_FASG890101': 'KERSQD',
'normwaalsvolume': 'GASTPDC',
'polarity': 'LIFWCMVY',
'polarizability': 'GASDT',
'charge': 'KR',
'secondarystruct': 'EALMQKRH',
'solventaccess': 'ALFCGIVW'
}
group2 = {
'hydrophobicity_PRAM900101': 'GASTPHY',
'hydrophobicity_ARGP820101': 'RAHCKMV',
'hydrophobicity_ZIMJ680101': 'HMCKV',
'hydrophobicity_PONP930101': 'GRHA',
'hydrophobicity_CASG920101': 'AHYMLV',
'hydrophobicity_ENGD860101': 'SGTAW',
'hydrophobicity_FASG890101': 'NTPG',
'normwaalsvolume': 'NVEQIL',
'polarity': 'PATGS',
'polarizability': 'CPNVEQIL',
'charge': 'ANCQGHILMFPSTWYV',
'secondarystruct': 'VIYCWFT',
'solventaccess': 'RKQEND'
}
group3 = {
'hydrophobicity_PRAM900101': 'CLVIMFW',
'hydrophobicity_ARGP820101': 'LYPFIW',
'hydrophobicity_ZIMJ680101': 'LPFYI',
'hydrophobicity_PONP930101': 'YMFWLCVI',
'hydrophobicity_CASG920101': 'FIWC',
'hydrophobicity_ENGD860101': 'CVLIMF',
'hydrophobicity_FASG890101': 'AYHWVMFLIC',
'normwaalsvolume': 'MHKFRYW',
'polarity': 'HQRKNED',
'polarizability': 'KMHFRYW',
'charge': 'DE',
'secondarystruct': 'GNPSD',
'solventaccess': 'MSPTHY'
}
groups = [group1, group2, group3]
property = (
'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101', 'hydrophobicity_PONP930101',
'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume',
'polarity', 'polarizability', 'charge', 'secondarystruct', 'solventaccess')
encodings = []
header = ['#']
for p in property:
for g in range(1, len(groups) + 1):
header.append(p + '.G' + str(g))
encodings.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
for p in property:
c1 = Count(group1[p], sequence) / len(sequence)
c2 = Count(group2[p], sequence) / len(sequence)
c3 = 1 - c1 - c2
code = code + [c1, c2, c3]
encodings.append(code)
return encodings
def savetsv(encodings, file = 'encoding.tsv'):
with open(file, 'w') as f:
if encodings == 0:
f.write('Descriptor calculation failed.')
else:
for i in range(len(encodings[0]) -1):
f.write(encodings[0][i] + '\t')
f.write(encodings[0][-1] + '\n')
for i in encodings[1:]:
f.write(i[0] + '\t')
for j in range(1, len(i) - 1):
f.write(str(float(i[j])) + '\t')
f.write(str(float(i[len(i)-1])) + '\n')
return None
import collections
def merge(lst1, lst2):
return [(sub + [lst2[i][-1]]) for i, sub in enumerate(lst1)]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process fasta input with random forest virulence prediction model')
parser.add_argument("--file", required=True, help="input fasta file")
parser.add_argument("--out", dest='outFile',
help="the generated descriptor file")
args = parser.parse_args()
fastas = readFasta(args.file)
encodings = CTDC(fastas)
outFile = args.outFile if args.outFile != None else 'encoding.tsv'
savetsv(encodings, outFile)
| samnooij/PathoFact | scripts/CTDC.py | CTDC.py | py | 4,014 | python | en | code | 3 | github-code | 50 |
39455640742 | import socket
s = socket.socket()
port = 8458
s.connect(('127.0.0.1', port))
print("\n Connected !!!")
opt = input("ARP or RARP : ")
if opt == "ARP":
inp = input("EnterIP : ")
elif opt == 'RARP':
inp = input('Enter Mac : ')
s.sendall(bytes(inp,'utf-8'))
if opt == 'ARP':
print("Mac ID : ",s.recv(1024).decode())
elif opt == 'RARP':
print("IP : ",s.recv(1024).decode())
s.close() | cdaman123/BTech_6_Lab | dccn/arp_client.py | arp_client.py | py | 408 | python | en | code | 3 | github-code | 50 |
33120973307 | from os import path, remove
from importlib import import_module
from sys import executable, path as syspath, argv
from time import sleep
from .config import Config
from .plugin import Plugin
from . import utils
class cmdQ (object):
def __init__ (self, configfile):
self.config = Config(configfile)
self.logger = utils.getLogger(logfile = self.config.log.file, loglevel = self.config.log.level, stream = True)
modelname = self.config.general.database + 'Model'
model = import_module('.models.%s' % self.config.general.database, __package__)
self.model = getattr(model, modelname)(self)
msgs = self.model.createTableIfNotExists()
if msgs:
for msg in msgs:
self.logger.info (msg)
self.workerbin = [path.join(path.dirname(path.realpath(argv[0])), 'cqworker'), configfile]
self.workerpid, self.workers = self.model.getWorkerPid()
self.plugins = self.getPlugins()
Plugin.call(self.plugins, 'onQueueInit')
def isWorkerAlive (self):
if self.workerpid is None:
return False
return utils.pidIsAlive(self.workerpid)
def stopWorker (self):
if self.isWorkerAlive():
self.logger.info('Stoping worker (PID %s) ...' % self.workerpid)
for w in self.workers:
r = utils.pidKill(w)
if not r:
self.logger.warning (' Failed to stop subworker (PID: %s).' % w)
else:
self.logger.info (' Subworker (PID: %s) stopped.' % w)
r = utils.pidKill (self.workerpid)
if not r:
self.logger.warning (' Failed to stop worker (PID: %s).' % self.workerpid)
else:
self.logger.info (' Worker (PID: %s) stopped.' % self.workerpid)
self.model.updateWorker(pid = None, workers = [])
Plugin.call(self.plugins, 'onStopWorker')
else:
self.logger.info ('Worker (PID: %s) is not running.' % self.workerpid)
def startWorker (self):
if self.isWorkerAlive():
self.logger.warning ('Worker (PID: %s) is already running.' % self.workerpid)
else:
cmd = [executable]
cmd.extend(self.workerbin)
self.workerpid = utils.cmdStart(cmd)
self.logger.info ('Try to start worker started at: %s' % self.workerpid)
# let cqworker update worker in database
sleep (.5)
self.workerpid, self.workers = self.model.getWorkerPid()
self.logger.info ('Worker started at: %s' % self.workerpid)
self.logger.info (' Subworkers started at: %s' % self.workers)
Plugin.call(self.plugins, 'onStartWorker')
def archive (self, outfile):
doneJobs = self.model.getJobsByStatus([self.model.STATUS['complete'], self.model.STATUS['error']])
# write header
cols = self.model.TABLECOLS_JOB
with open(outfile, 'w') as fout:
fout.write("#" + "\t".join(cols) + "\n")
for job in doneJobs:
fout.write("\t".join(list(map(str, [job[key] for key in cols]))) + "\n")
self.logger.info ('Completed/Error jobs archived to "%s".' % outfile)
self.model.deleteCompletedJobs()
self.logger.info ('Completed/Error jobs are removed from database.')
def restore (self, infile):
# read jobs from infile
cols = []
doneJobs = []
with open(infile) as fin:
for line in fin:
line = line.strip()
if not line: continue
parts = line.split("\t")
if line.startswith('#'):
parts[0] = parts[0][1:]
cols = parts
else:
doneJobs.append({cols[i]:part for i, part in enumerate(parts)})
self.model.restoreJobs(doneJobs)
self.logger.info ('Jobs from "%s" restored.' % infile)
def reset (self):
self.stopWorker()
self.model.reset()
self.logger.info ('Database reset.')
remove (self.config.log.file)
self.logger.info ('Log file removed.')
def workerInfo (self):
ret = ''
if self.isWorkerAlive():
ret += 'Worker is running at: %s\n' % self.workerpid
ret += 'Subworkers are running at: %s\n' % (', '.join(list(map(str, self.workers))))
else:
ret += 'Worker is not running.\n'
ret += 'Number of subworkers: %s.\n' % self.config.general.nworkers
ret += 'Jobs are pulled every %s seconds.\n' % self.config.general.interval
return ret
def listJobs(self, status = []):
jobs = self.model.getJobsByStatus(status)
Plugin.call(self.plugins, 'onListJobs', jobs = jobs)
return jobs
def getPlugins(self):
ret = []
pluginnames = self.config.general.plugins
for name in pluginnames:
pname = name + 'Plugin'
if pname not in self.config._config:
pdir = path.join(path.dirname(path.realpath(__file__)), 'plugins')
elif 'dir' in self.config._config[pname]:
pdir = self.config._config[pname]['dir']
syspath.append(pdir)
pluginClass = getattr(import_module(name), pname)
plugin = pluginClass(self, name)
ret.append(plugin)
self.logger.info('Plugin loaded: %s' % name)
return sorted(ret, key = lambda x: x.priority) | pwwang/cmdQueue | cmdQueue/cmdq.py | cmdq.py | py | 4,701 | python | en | code | 5 | github-code | 50 |
40058706462 | import geopandas
import trim_fifteen
import load_data
from trim_fifteen import *
parcels = load_data.parcels
envelope_trim = trim(parcels, 15)
assert(isinstance(envelope_trim.dtype, geopandas.array.GeometryDtype))
usable = trim_fifteen.trim_unusable_space(envelope_trim, 8)
usable_file = geopandas.GeoDataFrame(
{
'A_USABLE' : usable.area,
'geometry' : usable
},
crs=load_data.gdf.crs
)
usable_file = usable_file.loc[usable_file.geometry.geom_type == "Polygon"]
usable_file.to_file("./out/usable.shp")
envelope_file = geopandas.GeoDataFrame(
{
'geometry': envelope_trim
},
crs=load_data.gdf.crs
)
envelope_file = envelope_file.loc[envelope_file.geometry.geom_type == "Polygon"]
envelope_file.to_file("./out/envelope.shp")
| wnavarre/medford_gis | script/script.py | script.py | py | 773 | python | en | code | 1 | github-code | 50 |
13860489804 | from const import puntos_botones, color_botones, bolsa
class Puntaje:
def __init__(self):
self._dificultad = ''
def _set_dificultad(self, dificultad):
self._dificultad = dificultad
def calcular_puntos(self, quien, palabra):
''' CALCULA Y DEVUELVE EL PUNTAJE DE LA PALABRA ENTRANTE
SEGÚN EL COLOR DEL BOTÓN Y LA PUNTUACIÓN DE LA LETRA.'''
puntos, color = 0, ''
for key in palabra: # PARA CADA LLAVE EN _palabra (LA PALABRA UBICADA POR TURNO)
for colores in color_botones[self._dificultad]: # PARA CADA COLORES EN LOS COLORES DE LOS BOTONES
if key in color_botones[self._dificultad][colores]: # SI ESTAMOS EN EL COLOR CORRECTO DE ESA LETRA
color = colores[1] # CONSEGUIMOS EL COLOR PARA LA SUMA
break
puntos += self._sumar(color, bolsa[palabra[key]]['puntaje'], quien) # REALIZA LA SUMA
return puntos
def _sumar(self, color, puntaje_letra, quien):
''' DEVUELVE LA SUMA DE LA PUNTUACIÓN DE LA LETRA MAS EL COLOR'''
if color == 'Red':
return puntaje_letra-int(puntos_botones[self._dificultad][quien]['puntos_rojo'][-1:])
elif color == 'Green':
return puntaje_letra*int(puntos_botones[self._dificultad][quien]['puntos_verde'][-1:])
elif color == 'Blue':
return puntaje_letra+int(puntos_botones[self._dificultad][quien]['puntos_azul'][-1:])
elif color == 'Yellow':
return puntaje_letra+int(puntos_botones[self._dificultad][quien]['puntos_amarillo'][-1:])
else:
return puntaje_letra+int(puntos_botones[self._dificultad][quien]['puntos_gris'][-1:])
| agustjn/Juego-Scrabble | mod_puntos.py | mod_puntos.py | py | 1,716 | python | es | code | 0 | github-code | 50 |
43504055769 | # -*- coding: utf-8 -*-
import cv2
from pytesseract import pytesseract #outil de reconnaissance de caractères (OCR)
from pytesseract import Output
from tkinter import *
import tkinter as tk # création d'interfaces graphiques.
from tkinter import filedialog
import pandas as pd # l'analyse des données
import sys
import os
import csv
# detection des objects (image)
def detection_objet_image():
try :
filepath=filedialog.askopenfilename(filetypes=[
("image", ".jpeg"),
("image", ".png"),
("image", ".jpg"),
])
config_file = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
frozen_model = 'frozen_inference_graph.pb'
model = cv2.dnn_DetectionModel (frozen_model,config_file)
classLabels=[]
file_name='Labels.txt'
with open(file_name,'rt') as fpt:
classLabels = fpt.read().rstrip('\n').split('\n')
model.setInputSize(320,320)
model.setInputScale(1.0/127.5)
model.setInputMean((127.5,127.5,127.5))
model.setInputSwapRB(True)
img=cv2.imread(filepath)
ClassIndex, confidece, bbox = model.detect(img,confThreshold=0.5)
font_scale = 3
font = cv2.FONT_HERSHEY_PLAIN
for ClassInd , conf, boxes in zip(ClassIndex.flatten(),confidece.flatten(), bbox):
cv2.rectangle(img,boxes,(255,0,0),2)
cv2.putText(img,classLabels[ClassInd-1],(boxes[0]+10,boxes[1]+40),font, fontScale=font_scale,color=(0,255,0),thickness=2)
cv2.imshow("window",img)
cv2.waitKey(0)
except Exception:
pass
print("vous devez ajouter votre fichier !\n")
# detection des objects (video)
def detection_objet_video():
try:
filepath=filedialog.askopenfilename(filetypes=[
("all video format", ".mp4"),
("all video format", ".flv"),
("all video format", ".avi"),
])
config_file = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
frozen_model = 'frozen_inference_graph.pb'
model = cv2.dnn_DetectionModel (frozen_model,config_file)
classLabels=[]
file_name='Labels.txt'
with open(file_name,'rt') as fpt:
classLabels = fpt.read().rstrip('\n').split('\n')
model.setInputSize(320,320)
model.setInputScale(1.0/127.5)
model.setInputMean((127.5,127.5,127.5))
model.setInputSwapRB(True)
cap=cv2.VideoCapture(filepath)
#vérifiez si le videoCapture est ouvert correctement
if not cap.isOpened():
cap= cv2.videoCapture(0)
if not cap.isOpened():
raise IOError("impossible d'ouvrir la vidéo")
font_scale=3
font = cv2.FONT_HERSHEY_PLAIN
while cap.isOpened():
ret,frame = cap.read()
ClassIndex, confidece, bbox = model.detect(frame,confThreshold=0.55)
if (len(ClassIndex)!=0):
for ClassInd ,conf, boxes in zip(ClassIndex.flatten(),confidece.flatten(), bbox):
if(ClassInd<=80):
cv2.rectangle(frame,boxes,(255,0,0),2)
cv2.putText(frame,classLabels[ClassInd-1],(boxes[0]+10,boxes[1]+40),font, fontScale=font_scale,color=(0,255,0),thickness=2)
cv2.imshow("détection d'objet",frame)
if cv2.waitKey(2) & 0xFF == 27 :
break
cap.release()
except Exception as e:
print("la reconnaissance des objets dans la vidéo a été effectuée avec succès\n")
# detection de texte (image)
def detection_texte():
try:
#chemin pour Windows
#pytesseract.tesseract_cmd = "C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
#chemin pour Linux
pytesseract.tesseract_cmd = "/usr/bin/tesseract"
filepath=filedialog.askopenfilename(filetypes=[
("image", ".jpeg"),
("image", ".png"),
("image", ".jpg"),
])
img = cv2.imread(filepath)
image_data = pytesseract.image_to_data(img, output_type=Output.DICT)
for i, word in enumerate(image_data['text']):
if word !="":
x,y,w,h = image_data['left'][i],image_data['top'][i],image_data['width'][i],image_data['height'][i]
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),1)
cv2.putText(img, word,(x,y-16),cv2.FONT_HERSHEY_COMPLEX, 0.5,(0,255,0),1)
cv2.imshow("window",img)
cv2.waitKey(0)
# ajouter le contenu de l'image dans un fichier .txt
parse_text = []
word_list = []
last_word = ''
for word in image_data['text']:
if word!='':
word_list.append(word)
last_word = word
if (last_word!='' and word == '') or (word==image_data['text'][-1]):
parse_text.append(word_list)
word_list = []
with open('result_text.txt', 'w', newline="") as file:
csv.writer(file, delimiter=" ").writerows(parse_text)
except Exception:
pass
print("vous devez ajouter votre fichier !\n")
# detection de forme (image)
def detection_forme():
try:
filepath=filedialog.askopenfilename(filetypes=[
("image", ".jpeg"),
("image", ".png"),
("image", ".jpg"),
])
img = cv2.imread(filepath)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
i = 0
for contour in contours:
if i == 0:
i = 1
continue
approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)
cv2.drawContours(img, [contour], 0, (0, 0, 255), 5)
M = cv2.moments(contour)
if M['m00'] != 0.0:
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
# mettre le nom de la forme au centre de chaque forme
if len(approx) == 3:
cv2.putText(img, 'Triangle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
elif len(approx) == 4:
cv2.putText(img, 'Quadrilateral', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
elif len(approx) == 5:
cv2.putText(img, 'Pentagon', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
elif len(approx) == 6:
cv2.putText(img, 'Hexagon', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
elif len(approx) == 7:
cv2.putText(img, 'heptagone', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
elif len(approx) == 8:
cv2.putText(img, 'octogone', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
else:
cv2.putText(img, 'circle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.imshow('shapes', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception:
pass
print("vous devez ajouter votre fichier !\n")
# detection de couleurs (image)
def detection_couleur():
os.system("python3 couleur.py")
# creation d'une fenetre pour choisir le mode !
def window1():
window = tk.Tk()
window.title("Reconnaissance de texte APP !")
label=Label(window,text="choisissez votre mode de détection :")
label.pack()
label.place(x=170,y=1)
window.geometry('520x300')
window.minsize(520,300)
window.maxsize(520,300)
T1=Label(window,text="Détection d'objets [image] ",font=("Arial Bold",9))
T1.place(x=1,y=40)
button = Button(window,text='ouvrir le fichier', command=detection_objet_image,bg='light grey')
button.pack(side=tk.BOTTOM)
button.place(x=240,y=38)
T2=Label(window,text="Détection d'objets [vidéo] ",font=("Arial Bold",9))
T2.place(x=1,y=80)
button2= Button(window,text="ouvrir le fichier", command=detection_objet_video,bg='light grey')
button2.pack(side=tk.BOTTOM)
button2.place(x=240,y=78)
T3=Label(window,text="Reconnaissance de texte [image] ",font=("Arial Bold",9))
T3.place(x=1,y=120)
button3 = Button(window,text='ouvrir le fichier', command=detection_texte,bg='light grey')
button3.pack(side=tk.BOTTOM)
button3.place(x=240,y=116)
T4=Label(window,text="Reconnaissance de formes [image] ",font=("Arial Bold",9))
T4.place(x=1,y=160)
button4= Button(window,text="ouvrir le fichier", command=detection_forme,bg='light grey')
button4.pack(side=tk.BOTTOM)
button4.place(x=240,y=154)
T5=Label(window,text="Reconnaissance de couleurs [image] ",font=("Arial Bold",9))
T5.place(x=1,y=200)
button5 = Button(window,text='ouvrir le fichier', command=detection_couleur,bg='light grey')
button5.pack(side=tk.BOTTOM)
button5.place(x=240,y=192)
button6= Button(window,text="Close", command=window.destroy, bg='red')
button6.pack(side=tk.BOTTOM)
button6.place(x=263,y=240)
window.mainloop()
#MAIN :
window1() | Le0Mast3r/Reconnaissance-Objet-Texte | projet.py | projet.py | py | 10,288 | python | en | code | 1 | github-code | 50 |
14437309270 | from sysdata.data_blob import dataBlob
from syscore.constants import arg_not_supplied
from syscore.interactive.menus import print_menu_of_values_and_get_response
from sysproduction.data.positions import diagPositions
from sysproduction.data.optimal_positions import dataOptimalPositions
from sysproduction.data.generic_production_data import productionDataLayerGeneric
class diagStrategiesConfig(productionDataLayerGeneric):
## doesn't use anything in data class just accessses config
def get_strategy_config_dict_for_specific_process(
self, strategy_name: str, process_name: str
) -> dict:
this_strategy_dict = self.get_strategy_config_dict_for_strategy(strategy_name)
process_dict = this_strategy_dict.get(process_name, {})
return process_dict
def get_strategy_config_dict_for_strategy(self, strategy_name: str) -> dict:
strategy_dict = self.get_all_strategy_dict()
this_strategy_dict = strategy_dict[strategy_name]
return this_strategy_dict
def get_list_of_strategies(self) -> list:
strategy_dict = self.get_all_strategy_dict()
list_of_strategies = list(strategy_dict.keys())
return list_of_strategies
def get_strategy_allocation_config_dict(self) -> dict:
config = self.data.config
strategy_allocation_dict = config.get_element("strategy_capital_allocation")
return strategy_allocation_dict
def get_all_strategy_dict(self) -> dict:
strategy_dict = self.config.get_element("strategy_list")
return strategy_dict
@property
def config(self):
return self.data.config
def get_list_of_strategies(data: dataBlob = arg_not_supplied, source="config") -> list:
if source == "config":
return get_list_of_strategies_from_config(data)
elif source == "positions":
return get_list_of_strategies_from_positions(data)
elif source == "optimal_positions":
return get_list_of_strategies_from_optimal_positions(data)
else:
raise Exception("Source %s not recognised!" % source)
def get_list_of_strategies_from_config(data: dataBlob = arg_not_supplied) -> list:
diag_strategies_config = diagStrategiesConfig(data)
list_of_strategies = diag_strategies_config.get_list_of_strategies()
return list_of_strategies
def get_list_of_strategies_from_positions(data: dataBlob = arg_not_supplied) -> list:
diag_positions = diagPositions(data)
list_of_strategies = diag_positions.get_list_of_strategies_with_positions()
return list_of_strategies
def get_list_of_strategies_from_optimal_positions(
data: dataBlob = arg_not_supplied,
) -> list:
data_optimal_positions = dataOptimalPositions(data)
list_of_strategies = (
data_optimal_positions.get_list_of_strategies_with_optimal_position()
)
return list_of_strategies
def get_valid_strategy_name_from_user(
data: dataBlob = arg_not_supplied,
allow_all: bool = False,
all_code: str = "ALL",
source: str = "config",
):
all_strategies = get_list_of_strategies(data=data, source=source)
if allow_all:
default_strategy = all_code
else:
default_strategy = all_strategies[0]
strategy_name = print_menu_of_values_and_get_response(
all_strategies, default_str=default_strategy
)
return strategy_name
| robcarver17/pysystemtrade | sysproduction/data/strategies.py | strategies.py | py | 3,364 | python | en | code | 2,180 | github-code | 50 |
27907094259 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 10:58:20 2019
@author: KainingSheng
2D CNN for MRI zone prostate detection
This work is a 2D conversion of the model architecture found in:
Aldoj, N., Lukas, S., Dewey, M., Penzkofer, T., 2019.
Semi-automatic classification of prostate cancer on multi-parametric MR imaging using a multi-channel 3D convolutional neural network.
Eur. Radiol. https://doi.org/10.1007/s00330-019-06417-z
"""
import h5py
import keras
import tensorflow as tf
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Concatenate, concatenate
from keras.layers import Dropout, Input, BatchNormalization
from keras.activations import relu
from keras.models import Model
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
from keras.regularizers import l2
def getModel():
## input layer
input_layer = Input((96, 96,1))
## Block 1
conv_layer1 = Conv2D(filters=16, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(input_layer)
batch_layer1 = BatchNormalization()(conv_layer1)
pooling_layer1 = MaxPool2D(pool_size=(2, 2), strides=(2,2), padding='same')(batch_layer1)
## Block 2
conv_layer2 = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(pooling_layer1)
batch_layer2 = BatchNormalization()(conv_layer2)
conv_layer3 = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(batch_layer2)
batch_layer3 = BatchNormalization()(conv_layer3)
concat_2 = Concatenate(axis=-1)([pooling_layer1, batch_layer3])
pooling_layer2 = MaxPool2D(pool_size=(2, 2), strides=(2,2), padding='same')(concat_2)
## Block 3
conv_layer4 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(pooling_layer2)
batch_layer4 = BatchNormalization()(conv_layer4)
conv_layer5 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(batch_layer4)
batch_layer5 = BatchNormalization()(conv_layer5)
concat_3 = Concatenate(axis=-1)([pooling_layer2, batch_layer5])
pooling_layer3 = MaxPool2D(pool_size=(2, 2), strides=(2,2), padding='same')(concat_3)
## Block 4
conv_layer6 = Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(pooling_layer3)
batch_layer6 = BatchNormalization()(conv_layer6)
conv_layer7 = Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(batch_layer6)
batch_layer7 = BatchNormalization()(conv_layer7)
concat_4 = Concatenate(axis=-1)([pooling_layer3, batch_layer7])
pooling_layer4 = MaxPool2D(pool_size=(2, 2), padding='same')(batch_layer7)
## Block 5
conv_layer8 = Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(pooling_layer4)
batch_layer8 = BatchNormalization()(conv_layer8)
conv_layer9 = Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(batch_layer8)
batch_layer9 = BatchNormalization()(conv_layer9)
concat_5 = Concatenate(axis=-1)([pooling_layer4, batch_layer9])
pooling_layer5 = MaxPool2D(pool_size=(2, 2), padding='same')(concat_5)
## Block 6
conv_layer10 = Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(pooling_layer5)
batch_layer10 = BatchNormalization()(conv_layer10)
conv_layer11 = Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(batch_layer10)
batch_layer11 = BatchNormalization()(conv_layer11)
concat_6 = Concatenate(axis=-1)([pooling_layer5, batch_layer11])
pooling_layer6 = MaxPool2D(pool_size=(2, 2), strides=(2,2), padding='same')(concat_6)
## Block 7
conv_layer12 = Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(pooling_layer6)
batch_layer12 = BatchNormalization()(conv_layer12)
## perform batch normalization on the convolution outputs before feeding it to MLP architecture
pooling_layer12 = MaxPool2D(pool_size=(2, 2), strides=(2,2), padding='same')(batch_layer12)
flatten_layer = Flatten()(pooling_layer12)
## create an MLP architecture with dense layers : 2048 -> 512 -> 2
## add dropouts to avoid overfitting / perform regularization
dense_layer1 = Dense(units=2048, activation='relu',kernel_regularizer=l2(1e-4),kernel_initializer='random_uniform')(flatten_layer)
dense_layer1 = Dropout(0.5)(dense_layer1)
dense_layer2 = Dense(units=512, activation='relu',kernel_regularizer=l2(1e-4),kernel_initializer='random_uniform')(dense_layer1)
dense_layer2 = Dropout(0.5)(dense_layer2)
#output_layer = Dense(units=1, activation='sigmoid')(dense_layer2)
output_layer = Dense(units=2, activation='softmax')(dense_layer2)
## define the model with input layer and output layer
model = Model(inputs=input_layer, outputs=output_layer)
return model
| KainingSheng/mr-prostate-zone-classifier | 2DCNN.py | 2DCNN.py | py | 5,241 | python | en | code | 1 | github-code | 50 |
40092509120 | import FWCore.ParameterSet.Config as cms
AlignmentMonitorMuonSystemMap1D = cms.untracked.PSet(
muonCollectionTag = cms.InputTag(""),
beamSpotTag = cms.untracked.InputTag("offlineBeamSpot"),
minTrackPt = cms.double(100.),
maxTrackPt = cms.double(200.),
minTrackP = cms.double(0.),
maxTrackP = cms.double(99999.),
maxDxy = cms.double(100.),
minTrackerHits = cms.int32(15),
maxTrackerRedChi2 = cms.double(10.),
allowTIDTEC = cms.bool(True),
minNCrossedChambers = cms.int32(3),
minDT13Hits = cms.int32(8),
minDT2Hits = cms.int32(4),
minCSCHits = cms.int32(6),
doDT = cms.bool(True),
doCSC = cms.bool(True),
useStubPosition = cms.bool(False),
createNtuple = cms.bool(False)
)
| cms-sw/cmssw | Alignment/CommonAlignmentMonitor/python/AlignmentMonitorMuonSystemMap1D_cfi.py | AlignmentMonitorMuonSystemMap1D_cfi.py | py | 744 | python | en | code | 985 | github-code | 50 |
472951717 | #!/usr/bin/env python3
import json
import logging
import os
import subprocess
import shutil
import sys
from tarfile import TarFile
from time import sleep
import yaml
import docker
log_filename = "ar_inference_entry.log"
logging.basicConfig(
handlers=[logging.FileHandler(log_filename, mode="w"), logging.StreamHandler()],
format="%(asctime)s %(levelname)s:%(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
try:
config_file = os.getenv("CONFIG_FILE")
work_dir = os.getenv("TATOR_WORK_DIR")
if not os.path.isfile(config_file):
raise ValueError(f"Could not find config file '{config_file}', exiting")
with open(config_file, "r") as fp:
config = yaml.safe_load(fp)
state_type_id = config["state_type_id"]
sample_size = config["sample_size"]
attribute_name = config["attribute_name"]
upload_version = config["upload_version"]
video_order = config["video_order"]
# Copy model files
data_image = config["data_image"]
client = docker.from_env()
client.images.pull(data_image)
container = client.containers.create(data_image)
bits, _ = container.get_archive("/data")
data_tar = os.path.join(work_dir, "data.tar")
with open(data_tar, "wb") as fp:
for chunk in bits:
fp.write(chunk)
with TarFile(data_tar) as fp:
fp.extract("data/model_config.yaml", work_dir)
fp.extract("data/deploy_model.pth", work_dir)
args = [
"python3",
"/scripts/ar_inference.py",
"--host",
os.getenv("TATOR_API_SERVICE").replace("/rest", ""),
"--token",
os.getenv("TATOR_AUTH_TOKEN"),
"--access-key",
os.getenv("OBJECT_STORAGE_ACCESS_KEY"),
"--secret-key",
os.getenv("OBJECT_STORAGE_SECRET_KEY"),
"--s3-bucket",
os.getenv("S3_BUCKET"),
"--endpoint-url",
os.getenv("ENDPOINT_URL"),
"--work-dir",
work_dir,
"--project-id",
os.getenv("TATOR_PROJECT_ID"),
"--attribute-name",
attribute_name,
"--upload-version",
str(upload_version),
"--multiview-ids",
*os.getenv("TATOR_MEDIA_IDS").split(","),
"--state-type",
str(state_type_id),
"--model-config-file",
os.path.join(work_dir, "data", "model_config.yaml"),
"--sample-size",
str(sample_size),
"--video-order",
*[str(vid) for vid in video_order],
]
cmd = " ".join(args)
logger.info(f"Feature Extraction Command = '{cmd}'")
p = subprocess.Popen(args)
p.wait()
sys.exit(p.returncode)
except SystemExit:
# sys.exit() raises a SystemExit exception, so just reraise it here for a happy exit
raise
except:
# Some other exception was raised, sleep to keep the container alive for inspection
logger.error("Failed with exception", exc_info=True)
sleep(600)
| openem-team/openem | scripts/tator/ar_inference_entry.py | ar_inference_entry.py | py | 3,281 | python | en | code | 11 | github-code | 50 |
24693899978 | import numpy as np #for feature extraction
import pandas as pd #for storing data
import matplotlib.pyplot as plt #for visualizing data
from datetime import datetime, timedelta #for timestamp storage
from scipy.fftpack import fft #for FFT
from scipy.signal import find_peaks #for peak detection
from scipy.signal import welch #for PSD
from sklearn.ensemble import RandomForestClassifier #for model creation
from joblib import dump, load #for model storage
from scipy.signal import butter,filtfilt #for low pass filter
#import the raw data for accelerometer, gyroscope, and labels
accelData = pd.read_csv('C:\\Users\\MichaelK\\Documents\\SeniorDesign\\accelerometer_data.txt')
gyroData = pd.read_csv('C:\\Users\\MichaelK\\Documents\\SeniorDesign\\gyroscope_data.txt')
labelData = pd.read_csv('C:\\Users\\MichaelK\\Documents\\SeniorDesign\\activity_data.txt', keep_default_na=False)
#Low pass filter at variable frequency
#inputs - df (dataframe), freq (integer) desired cutoff frequency
#outputs - dataframe with filtered data
def lpf(df, freq):
samples = df.size
startTime = datetime.strptime(df['Time'][0], '%m/%d/%y %H:%M:%S.%f')
endTime = datetime.strptime(df['Time'][df.index[-1]], '%m/%d/%y %H:%M:%S.%f')
duration = (endTime - startTime).total_seconds()
sampleFreq = samples/duration
nyq = sampleFreq * 0.5
normal_cutoff = freq/nyq
print(normal_cutoff)
order = 2
b, a = butter(order, normal_cutoff, btype='low', analog=False)
y = filtfilt(b,a,df['X'])
return y
#combines all data into one file, correlating accel and gyro time values and applying a label to every entry
#inputs - 3 dataframes containing info from accelerometer, gyroscope, and activity
#output - dataframe containing combined info with correlated datapoints
# !!! This function can take hours to run depending on the system being used !!!
#save the resulting dataframe to a file after running so it doesnt need to be used twice on the same data
def combine_data(accelData, gyroData, labelData):
outData = pd.DataFrame(columns = ['Time','aX','aY','aZ','gX','gY','gZ','Activity'])
#instantiating variables
currentLabel = 'Null'
gyroIndex = 0
labelIndex = 0
closeness = timedelta(seconds = 0.05)
#Finding the current activity and saving it
for index, row in accelData.iterrows():
accelTime = datetime.strptime(row['Time'], '%m/%d/%y %H:%M:%S.%f') #using datetime objects rather than strings or floats
oldLabel = currentLabel
for i in range(labelIndex,labelData['Time'].count()-1):
labelTime = datetime.strptime(labelData['Time'][i], '%m/%d/%y %H:%M:%S')
#print(labelTime, accelTime)
if labelTime < accelTime: #This works due to the format of the activity data file
labelIndex = i
currentLabel = labelData['Activity'][i]
#According to Ali, NULL and unanswered can be interpreted as whatever the most recent label is
if currentLabel == 'NULL' or currentLabel == 'Unanswered' or currentLabel == 'No Change':
currentLabel = oldLabel
else:
break
#test print -- uncomment this to ensure the function is working if it takes too long
if oldLabel != currentLabel:
print('Current Label: ' + currentLabel)
oldLabel = currentLabel #used when NULL or Unanswered is encountered
#Saving the current gyroscope timestamp as a datetime object
gyroTime = datetime.strptime(gyroData['Time'][gyroIndex], '%m/%d/%y %H:%M:%S.%f')
if gyroIndex < gyroData['Time'].count()-1: #Preventing out of bounds
nextGyroTime = datetime.strptime(gyroData['Time'][gyroIndex + 1], '%m/%d/%y %H:%M:%S.%f')
while abs(accelTime-gyroTime) > abs(accelTime-nextGyroTime): #location in gyroscope data never needs to decrement
gyroIndex += 1
if gyroIndex == gyroData['Time'].count()-1: #if we get to the last datapoint in gyro
break
gyroTime = datetime.strptime(gyroData['Time'][gyroIndex], '%m/%d/%y %H:%M:%S.%f')
nextGyroTime = datetime.strptime(gyroData['Time'][gyroIndex + 1], '%m/%d/%y %H:%M:%S.%f')
#putting all of the values together and appending them to the output dataframe
outInfo = {'Time':accelTime, 'aX':row['X'],'aY':row['Y'],'aZ':row['Z'],'gX':gyroData['X'][gyroIndex],'gY':gyroData['Y'][gyroIndex],'gZ':gyroData['Z'][gyroIndex],'Activity':currentLabel}
outData = outData.append(outInfo,ignore_index = True)
return outData
#splits a dataframe into 1000 entry dataframes with 50% overlap
#inputs - df (dataframe)
#outputs - chunkList (list) of (dataframe)
def chunk_data(df, chunkSize): #input dataframe
chunkCount = len(df.index)//chunkSize #determining the amount of adjacent chunks that will fit
chunkList = []
if chunkCount == 1:
chunk1 = df.iloc[0:chunkSize]
chunk2 = df.iloc[-chunkSize:]
chunkList.append(chunk1)
chunkList.append(chunk2)
return chunkList
for i in range(chunkCount-1):
tempChunk = df.iloc[i*chunkSize:i*chunkSize+chunkSize] #normal chunks
tempChunk.reset_index(drop = True, inplace = True)
chunkList.append(tempChunk)
if i*chunkSize+chunkSize*1.5 <= len(df.index):
tempChunk = df.iloc[i*chunkSize + 0.5*chunkSize:i*chunkSize+chunkSize*1.5] #offset chunks
tempChunk.reset_index(drop = True, inplace = True)
chunkList.append(tempChunk)
return chunkList
#removes rows from a dataframe containg outlying values in the specified column
#inputs - df (dataframe), col (string), devs (int)
#outputs - modified input dataframe
def remove_outliers_col(df,col, devs): #input dataframe and column name (string)
std = df[col].std() #find standard deviation
while df[col].max() > devs*std: #removing values higher than 5 standard deviations
df = df.drop(df[col].idxmax())
while df[col].min() < -devs*std: #removing values lower than 5 standard deviations
df = df.drop(df[col].idxmin())
df.head()
return df #returns the modified dataframe
#This function uses remove_outliers_col remove outliers from every column in a dataframe
#!!! do not use this on the entire data, use it on individual activities !!!
#inputs - df (dataframe) - devs (int) - devs is the amount of standard deviations to allow before a datapoint is removed
#outputs - modified input dataframe
def remove_outliers_df(df, devs):
oldCount = len(df.index) #for determining percentage removed
for key, value in df.iteritems(): #iterating over the dataframe
if key != 'Time' and key != 'Activity':
df = remove_outliers_col(df,key, devs) #calling remove outliers
df = df.reset_index(drop=True) #fixing indexing, which has gaps after rows are removed
print(df['Activity'].iloc[0],'datapoints removed:', oldCount-len(df.index)) #outputting removal statistics for each activity
print(df['Activity'].iloc[0],'percentage removed: '+ str(round((oldCount-len(df.index))*100/oldCount,2))+'%\n')
return df
#defining variables for feature extraction
#these values may be modified to improve FFT generation
t_n = 0.1
N = 1000
T = t_n / N
f_s = 1/T
#this function finds frequency from time for a chunk of data
#input - chunk (dataFrame)
#output - outData (dataFrame) containing frequency and fft values for all sensors and axis
#code from http://ataspinar.com/2018/04/04/machine-learning-with-signal-processing-techniques/
def get_fft_values(chunk):
f_values = np.linspace(0.0, 1.0/(2.0*T), N//2)
fft_ax_ = fft(chunk['aX'])
fft_ay_ = fft(chunk['aY'])
fft_az_ = fft(chunk['aZ'])
fft_ax = 2.0/N * np.abs(fft_ax_[0:N//2])
fft_ay = 2.0/N * np.abs(fft_ay_[0:N//2])
fft_az = 2.0/N * np.abs(fft_az_[0:N//2])
fft_gx_ = fft(chunk['gX'])
fft_gy_ = fft(chunk['gY'])
fft_gz_ = fft(chunk['gZ'])
fft_gx = 2.0/N * np.abs(fft_gx_[0:N//2])
fft_gy = 2.0/N * np.abs(fft_gy_[0:N//2])
fft_gz = 2.0/N * np.abs(fft_gz_[0:N//2])
outInfo = {'Frequency':f_values,'aX':fft_ax,'aY':fft_ay,'aZ':fft_az,'gX':fft_gx,'gY':fft_gy,'gZ':fft_gz,'Activity':chunk['Activity'][0:500]}
outData = pd.DataFrame(outInfo)
outData = outData.reset_index(drop=True)
return outData
#plots data against time for a column in a dataset
#inputs - df (dataFrame), col (String)
#output - plot
def plot_axis(df, col):
#determining sensor and axis from col
if col[0] == 'g': sensor = 'gyroscope '
else: sensor = 'accelerometer '
if col[1] == 'X': axis = 'X axis'
elif col[1] == 'Y': axis = 'Y axis'
else: axis = 'Z axis'
#plot the amplitude angainst the frequency
plt.plot(df['Frequency'], df[col], linestyle='-', color='blue')
plt.xlabel('Frequency [Hz]', fontsize=16)
plt.ylabel('Amplitude', fontsize=16)
plt.title("Frequency domain of the " + sensor + axis, fontsize=16)
plt.show()
return
#runs auto correlation on a column of data
#inputs - col (list)
#outputs - correlation resutl
def autocorr(col):
result = []
result = np.correlate(col, col, mode='full')
return result[len(result)//2:]
#uses autocorr function to produce array of correlation values
#derived from a function by Ahmet Taspinar
#http://ataspinar.com/2018/04/04/machine-learning-with-signal-processing-techniques/
def get_autocorr_values(df):
t_n = 0.1
N = 1000
T = t_n / N
f_s = 1/T
x_values = np.array([T * jj for jj in range(0, N)])
outData = pd.DataFrame(columns = ['x_values','aX','aY','aZ','gX','gY','gZ'])
outData['x_values'] = x_values
for key, value in df.iteritems():
if key != 'Time' and key != 'Activity':
outData[key] = autocorr(df[key])
return outData
#function produced as a modification of code written by basj on stack overflow
#https://stackoverflow.com/questions/1713335/peak-finding-algorithm-for-python-scipy
#inputs - df (dataFrame), col (String), cnt (Int) - cnt is the number of peaks to identify, plt (Boolean)
#output - plot
def peak_detection(df, col, cnt, dist, plot):
#determining sensor and axis from col
if col[0] == 'g': sensor = 'gyroscope '
else: sensor = 'accelerometer '
if col[1] == 'X': axis = 'X axis'
elif col[1] == 'Y': axis = 'Y axis'
else: axis = 'Z axis'
x = df[col]
prom = 10 #using prominence to find peaks
peaks2, _ = find_peaks(x, prominence=prom, distance = dist)
while len(peaks2) < cnt: #reducing prominence until correct amount of peaks is reached
prom -= 0.00025
peaks2, _ = find_peaks(x, prominence=prom, distance = dist)
if plot: #if the user wants to plot the results
plt.plot(peaks2, x[peaks2], "ob"); plt.plot(x); plt.legend(['peaks'])
plt.title("Peak Detection for the " + sensor + axis, fontsize=16)
plt.show()
return x[peaks2]
#splits a main dataFrame into subset based on the activity label
#input - df (dataFrame)
#output - outList (list) of (dataFrame)
def split_activities(df):
outList = []
activityList = []
for activity in df['Activity']: #creating a list of activities
exists = False
for i in activityList:
if activity == i:
exists = True
if not exists:
activityList.append(activity)
for i in activityList: #seperating main dataframe by activity
outList.append(df[df['Activity'] == i].reset_index(drop=True))
return outList
#Extracts the power spectral density from each column in a given chunk
#derived from a function by Ahmet Taspinar
#http://ataspinar.com/2018/04/04/machine-learning-with-signal-processing-techniques/
def get_psd_values(chunk):
t_n = 0.1
N = 1000
T = t_n / N
f_s = 1/T
f_values, psd_aX = welch(chunk['aX'], fs=f_s)
f_values, psd_aY = welch(chunk['aY'], fs=f_s)
f_values, psd_aZ = welch(chunk['aZ'], fs=f_s)
f_values, psd_gX = welch(chunk['gX'], fs=f_s)
f_values, psd_gY = welch(chunk['gY'], fs=f_s)
f_values, psd_gZ = welch(chunk['gZ'], fs=f_s)
outInfo = {'Frequency':f_values,'aX':psd_aX,'aY':psd_aY,'aZ':psd_aZ,'gX':psd_gX,'gY':psd_gY,'gZ':psd_gZ}
outData = pd.DataFrame(outInfo)
return outData
#finds change in acceleration for six sub chunks of a larger chunk
#inputs - col (list)
#outputs - col (list)
def jerk_col(col):
jerkOut = diff(col) #jerk is the derivative of acceleration
return jerkOut
#uses jerk_col function to find the jerk values for 6 sub chunks of each column in a chunk
#inputs - chunk (dataFrame)
#outputs - outData (dataFrame)
def find_jerk(chunk):
outData = pd.DataFrame(columns = ['count','aX','aY','aZ','gX','gY','gZ'])
outData['count'] = [1,2,3,4,5,6] #outputs 6 jerk values for each sensor axis
for key, value in chunk.iteritems(): #itterating over the chunk's columns
if key != 'Time' and key != 'Activity':
outData[key] = jerk_col(value) #finding the jerk of each column
return outData
#finds the amount of zero crossing points for a column of data
#inputs - col (list)
#outputs - timesCrossed (int)
def zero_cross_col(col):
timesCrossed = [0];
lastVal = 0
for i in col:
if i < 0 and lastVal > 0:
timesCrossed[0] += 1
elif i > 0 and lastVal < 0:
timesCrossed[0] += 1
lastVal = i
return timesCrossed
#uses zero_cross_col to find zero crossing points for each column in a data chunk
#inputs - chunk (dataFrame)
#outputs - outData (dataFrame)
def find_zero_cross(chunk):
outData = pd.DataFrame(columns = ['aX','aY','aZ','gX','gY','gZ']) #dataframe will only have depth of 1
for key, value in chunk.iteritems(): #itterating over the chunk's columns
if key != 'Time' and key != 'Activity':
outData[key] = zero_cross_col(value)
return outData
#fins mean of 6 subsections of a column of data
#inputs - col(array)
#outputs - meanOut (array) of 6 means
#!!!INCOMPLETE FUNCTION!!!
def mean_col(col):
meanSum = np.zeros(6)
meanCount = np.zeros(6)
meanOut = np.zeros(6)
i = 0
while i < 1000:
if i < 1000/6:
meanSum[0] += col[i]
meanCount[0] += 1
if i < 2000/6:
meanSum[1] += col[i]
meanCount[1] += 1
if i < 3000/6:
meanSum[2] += col[i]
meanCount[2] += 1
if i < 4000/6:
meanSum[3] += col[i]
meanCount[3] += 1
if i < 5000/6:
meanSum[4] += col[i]
meanCount[4] += 1
if i < 6000/6:
meanSum[5] += col[i]
meanCount[5] += 1
i += 1
for j in range(6):
meanOut[j] = meanSum[j]/meanCount[j]
return meanOut
#Uses mean_col function to find the mean values for a data chunk
#inputs - chunk (dataFrame)
#outputs - outData (dataFrame)
def find_mean(chunk):
outData = pd.DataFrame(columns = ['count','aX','aY','aZ','gX','gY','gZ'])
outData['count'] = [1,2,3,4,5,6] #outputs 6 averages for each sensor axis
for key, value in chunk.iteritems(): #itterating over the chunk's columns
if key != 'Time' and key != 'Activity':
outData[key] = mean_col(value)
return outData
#this function takes a list of dataFrames divided by activity and extracts their features
#input - dfList (List) of (DataFrame)
#output - outData (DataFrame) with a row of features for each chunk
def extract_features(dfList):
columnList = []
for i in range(72):
columnList.append('Feature ' + str(i))
columnList.append('Activity')
outData = pd.DataFrame(columns = columnList)
for df in dfList:
chunkList = chunk_data(df)
#print(len(chunkList))
i = 0
for chunk in chunkList:
#print(i)
fftData = get_fft_values(chunk)
psdData = get_psd_values(chunk)
#corData = get_autocorr_values(chunk)
featNum = 0
outInfo = {}
for key, value in fftData.iteritems():
if key != 'Frequency' and key != 'Activity':
fftPks = peak_detection(fftData, key, 6, 1, False)[0:6]
psdPks = peak_detection(psdData, key, 6, 1, False)[0:6]
for j in range(6):
outInfo.update([('Feature '+ str(featNum),fftPks.iloc[j]),('Feature '+str(featNum+1),psdPks.iloc[j])])
#outInfo.update([('Feature '+ str(featNum),[fftPks.index[j],fftPks.iloc[j]]),('Feature '+str(featNum+1),[psdPks.index[j],psdPks.iloc[j]])])
featNum += 2
i += 1
outInfo.update([('Activity',chunk['Activity'].iloc[0])])
outData = outData.append(outInfo,ignore_index = True)
return outData
def feature_selection(df):
#initializing values
featureCount = 0
columnList = []
#getting info from user
print('Answer the following questions to determine \nthe features extracted (Y/N):')
#breaks out of loop if user enters y or n (not case sensitive) else recurs
while(True):
fft = input('\nFrequency from Time? ')
fft = fft.capitalize()
if check_valid(fft):
break
#converts user input to boolean equivelant and modifies feature count if necessary
if fft == 'Y':
fft = True
featureCount += 36
else:
fft = False
#repeat for all features
while(True):
psd = input('\nPower Spectral Density? ')
psd = psd.capitalize()
if check_valid(psd):
break
if psd == 'Y':
psd = True
featureCount += 36
else:
psd = False
while(True):
jrk = input('\nJerk? ')
jrk = jrk.capitalize()
if check_valid(jrk):
break
if jrk == 'Y':
jrk = True
featureCount += 36
else:
jrk = False
while(True):
mean = input('\nMean? ')
mean = mean.capitalize()
if check_valid(mean):
break
if mean == 'Y':
mean = True
featureCount += 36
else:
mean = False
while(True):
zcr = input('\nZero Crossing Rate? ')
zcr = zcr.capitalize()
if check_valid(zcr):
break
if zcr == 'Y':
zcr = True
featureCount += 1
else:
zcr = False
#initializing output dataframe with correct amount of columns
for i in range(featureCount):
columnList.append('Feature ' + str(i))
columnList.append('Activity')
outData = pd.DataFrame(columns = columnList)
#extracting desired features from the data
chunkList = chunk_data(df)
for chunk in chunkList:
if fft:
fftData = get_fft_values(chunk)
if psd:
psdData = get_psd_values(chunk)
if jrk:
jrkData = find_jerk(chunk)
featNum = 0
outInfo = {}
for key, value in fftData.iteritems():
if key != 'Frequency' and key != 'Activity':
if fft:
fftPks = peak_detection(fftData, key, 6, 1, False)[0:6]
if psd:
psdPks = peak_detection(psdData, key, 6, 1, False)[0:6]
if jrk:
jrkPks = peak_detection(jrkData, key, 6, 1, False)[0:6]
for j in range(6): #needs fix!
outInfo.update([('Feature '+ str(featNum),fftPks.iloc[j]),('Feature '+str(featNum+1),psdPks.iloc[j])])
featNum += 2
outInfo.update([('Activity',chunk['Activity'].iloc[0])])
outData = outData.append(outInfo,ignore_index = True)
return outData
###############################################################################
try:
#importing saved compiled data to avoid running combine_data
allData = pd.read_csv('allData.csv')
allData = allData.drop(columns = 'Unnamed: 0')
allData.describe()
#only use once per input data set - correlates the data
except: #if there is not an allData file saved
allData = combine_data(accelData,gyroData,labelData)
allData.to_csv('allData.csv')
#seperates the main data by activity
activityDfList = split_activities(allData)
#removes outlying datapoints from each activity
prunedDataList = []
for df in activityDfList:
if df['Activity'].iloc[1] != 'Work In Lab' and df['Activity'].iloc[1] != 'Not in List':
prunedDataList.append(remove_outliers_df(df, 4))
#creating the model
clf = RandomForestClassifier(n_estimators=1000, max_depth=3,random_state=0)
#extracting features for training
trainingData = extract_features(prunedDataList[0:3])
#training the model
noLabelData = trainingData.drop(['Activity'],axis = 1)
clf.fit(noLabelData, trainingData['Activity'])
#saving the model to a file
dump(clf,'testmodel.joblib')
#import the test data for accelerometer, gyroscope, and labels
testAccelData = pd.read_csv('C:\\Users\\Micha\\Documents\\SeniorDesign\\testdata\\accelerometer_data (1).txt')
testGyroData = pd.read_csv('C:\\Users\\Micha\\Documents\\SeniorDesign\\testdata\\gyroscope_data (1).txt')
testLabelData = pd.read_csv('C:\\Users\\Micha\\Documents\\SeniorDesign\\testdata\\activity_data (1).txt', keep_default_na=False)
rawTestData = combine_data(testAccelData,testGyroData,testLabelData)
testDfList = split_activities(rawTestData)
#removes outlying datapoints from each activity
prunedTestData = []
for df in testDfList:
if df['Activity'].iloc[1] != 'Work In Lab' and df['Activity'].iloc[1] != 'Not in List':
prunedTestList.append(remove_outliers_df(df, 4))
| mekroesche/Senior-Design | Main.py | Main.py | py | 21,847 | python | en | code | 0 | github-code | 50 |
25680441335 | import unittest
from abc import ABC
from dataclasses import dataclass, is_dataclass
from __seedwork.domain.entities import Entity
from __seedwork.domain.value_objects import UniqueEntityId
@dataclass(frozen=True, kw_only=True)
class StubEntity(Entity):
prop1: str
prop2: str
class TestEntityUnit(unittest.TestCase):
def test_if_is_a_dataclass(self):
self.assertTrue(is_dataclass(Entity))
def test_if_is_a_abstract_class(self):
self.assertIsInstance(Entity(), ABC)
def test_set_id_and_props(self):
entity = StubEntity(prop1="value1", prop2="value2")
self.assertEqual(entity.prop1, "value1")
self.assertEqual(entity.prop2, "value2")
self.assertIsInstance(entity.unique_entity_id, UniqueEntityId)
self.assertEqual(entity.unique_entity_id.id, entity.id)
def test_accept_a_valid_uuid(self):
entity = StubEntity(unique_entity_id=UniqueEntityId("9f2ec4aa-010b-4282-addb-6d738cc27676"),
prop1="value1",
prop2="value2")
self.assertEqual(entity.id, "9f2ec4aa-010b-4282-addb-6d738cc27676")
def test_to_dict_method(self):
entity = StubEntity(unique_entity_id=UniqueEntityId("9f2ec4aa-010b-4282-addb-6d738cc27676"),
prop1="value1",
prop2="value2")
self.assertDictEqual(entity.to_dict(), {
"id": "9f2ec4aa-010b-4282-addb-6d738cc27676",
"prop1": "value1",
"prop2": "value2"
})
# pylint: disable=protected-access
def test_set_attributes(self):
entity = StubEntity(prop1="val1", prop2="val2")
entity._set("prop1", "new_val")
self.assertEqual(entity.prop1, "new_val")
| andremagui/micro-admin-videos-python | src/__seedwork/tests/unit/domain/test_unit_entities.py | test_unit_entities.py | py | 1,766 | python | en | code | 0 | github-code | 50 |
26379844030 | # https://www.acmicpc.net/problem/7512
'''
1. 아이디어 :
1) (시간초과)에라토스테네스의 체로 소수를 구한다.
해시맵을 만들어서 연속돼는 n의 합을 슬라이딩 윈도우로 구하고,
소수인지 확인한다음, 해시맵에 넣는다. value가 m인 첫번째 key를 출력한다.
2) (틀림) 에라토스테네스의 체를 너무 많이 연산했더니 시간초과가 난다. 각 n에 대해 1000가지만 셋에 저장해봤다.
3) 1000이상으로 가면 시간초과, 그 이하는 답이 안나온다.
다른 방법으로 에라토스테네스의 체로 소수를 10^4까지 구하고, 연속되는 n에 대해 set을 10^4개 만들어서 저장한다.
슬라이딩 윈도우로, 연속되는 n의 합의 모든 수를 소수이면 set[n]에 다 넣는다.
입력되는 n들에 대해, set들의 교집합을 구한다
2. 시간복잡도 :
1) O(10**7) * O(10**7) + O(t) * O(m) * O(10**7) = max( O(10**14), O(t) * O(m) * O(10**7) )
2) O(10**4) * O(10**4) + O(10**4) * O(10**4) + O(t) * O(m) = O(10**8)
- 에라토스테네스의 체 + 소수들 * 슬라이딩 윈도우 + 테스트케이스 * m
3. 자료구조 :
1) 에라토스테네스의 체, 해시맵
1) 에라토스테네스의 체, 배열, set
'''
def prime_list(start, end):
# 에라토스테네스의 체 초기화: n개 요소에 True 설정(소수로 간주)
sieve = [True] * end
if start <= 0:
start = 2
# n의 최대 약수가 sqrt(n) 이하이므로 i=sqrt(n)까지 검사
m = int(end ** 0.5)
for i in range(2, m + 1):
if sieve[i]: # i가 소수인 경우
for j in range(i + i, end, i): # i이후 i의 배수들을 False 판정
sieve[j] = False
# 소수 목록 산출
return [i for i in range(start, end) if sieve[i]]
import sys
input = sys.stdin.readline
n = 10 ** 7
primes = prime_list(0, n)
primes_set = set(primes)
n_primes = [set() for _ in range(10 ** 4 + 1)]
for n in range(1, 10 ** 4 + 1):
n_prime = sum(primes[:n])
if n_prime in primes_set:
n_primes[n].add(n_prime)
for i in range(n, len(primes)):
n_prime += primes[i] - primes[i - n]
if n_prime >= 10 ** 7:
break
if n_prime in primes_set:
n_primes[n].add(n_prime)
t = int(sys.stdin.readline())
for tests in range(1, t + 1):
m = int(input())
nums = [int(x) for x in input().split()]
answer = primes_set.copy()
for j in range(m):
answer &= n_primes[nums[j]]
print("Scenario %d:" % tests)
print(min(answer))
print()
# 1)
# import sys
# alist = [1,2,3,4]
# blist = [2,4]
# input = sys.stdin.readline
# cases = int(input())
# prime_table = prime_list(0, 10 ** 7)
# temp_table = prime_table.copy()
# for _ in range(cases):
# n = int(input())
# lengths = list(map(int, input().split()))
#
# for j in range(n):
# temp_list=[]
# length = lengths[j]
# temp_total = 0
# for i in range(length):
# temp_total += prime_table[i]
# lp = 0
# rp = lp + length
# while rp < len(prime_table) and temp_total<= 10 **4:
# if temp_total in prime_table:
# temp_list.append(temp_total)
# temp_total -= prime_table[lp]
# lp += 1
# temp_total += prime_table[rp]
# rp += 1
# # print(sorted(temp_list))
# temp_table = list(set(temp_table) & set(temp_list))
# print("Scenario {}:".format(_+1))
# #if len(temp_table)==0:
# # assert False
# print(sorted(temp_table)[0]) | 724thomas/CodingChallenge_Python | baekjoon/7512.py | 7512.py | py | 3,638 | python | ko | code | 0 | github-code | 50 |
74639911515 | """
To run flow:
python timeslice_cluster_network_flow.py run --lib_network_name "G_library.pickle" --lib_time "G_timeslices.pickle" --time_interval 10 --min_time 1945 --n_top 3
"""
from metaflow import FlowSpec, step, project, Parameter, S3, pip
import json
import pickle
@project(name="pec_library")
class TimesliceClusterNetwork(FlowSpec):
"""Generates library network subgraphs based on time intervals.
"Sanitises" clusters by propogating cluster labels across time
intervals based on jaccard similarity at time interval t and
time interval t+1. Clusters, names and colors subgraph clusters.
Attributes:
library_network_name: library network file name.
timeslice_interval: the time interval to timeslice the library network
into subgraphs.
n_top: top n tf-idf words to name clusters.
"""
library_network_name: str
library_timeslices: str
timeslice_interval: int
min_timeslice: int
n_top: int
library_network_name = Parameter(
"lib_network_name",
help="file name to store subject pair cooccurance network in s3.",
type=str,
default="G_library.pickle",
)
library_timeslices = Parameter(
"lib_time",
help="file name to store a dictionary of timesliced, clustered subgraphs in s3.",
type=str,
default="G_timeslices.pickle",
)
timeslice_interval = Parameter(
"time_interval",
help="the time interval to timeslice the library network into subgraphs.",
type=int,
default=10,
)
min_timeslice = Parameter(
"min_time",
help="time minimum year to timeslice library network into subgraphs.",
type=int,
default=1965, # parameter defined by distribution of years across network
)
n_top = Parameter(
"n_top", help="top n tf-idf words to name clusters.", type=int, default=3
)
@step
def start(self):
"""Load library data from s3."""
from pec_library import bucket_name
with S3(s3root="s3://" + bucket_name + "/outputs/") as s3:
library_network_obj = s3.get(self.library_network_name)
self.library_network = pickle.loads(library_network_obj.blob)
print(
f"successfully loaded library data from {'s3://' + bucket_name + '/outputs/' + self.library_network_name}"
)
self.next(self.cluster_timeslice_network)
@step
def cluster_timeslice_network(self):
"""slice network into subgraphs based on t time interval.
Cluster every subgraph using leiden algorithm."""
from pec_library.pipeline.timeslice_cluster_network_utils import (
timeslice_subject_pair_coo_graph,
cluster_timeslice_subject_pair_coo_graph,
)
self.timeslices = timeslice_subject_pair_coo_graph(
self.library_network, self.timeslice_interval, self.min_timeslice
)
self.subgraph_communities = cluster_timeslice_subject_pair_coo_graph(
self.timeslices
)
print("timesliced network and clustered subgraphs!")
self.next(self.sanitise_clusters)
@step
def sanitise_clusters(self):
"""Propogate cluster labels greedily across time intervals
based on jaccard similarity at timeslice t and timeslice t + 1."""
from pec_library.pipeline.timeslice_cluster_network_utils import (
sanitise_clusters,
)
for i in range(len(self.subgraph_communities) - 1):
timeslice_x = "G_timeslice_" + str(i)
timeslice_y = "G_timeslice_" + str(i + 1)
sanitise_clusters(
self.subgraph_communities[timeslice_x],
self.subgraph_communities[timeslice_y],
)
print("sanitised clusters!")
self.next(self.color_name_clusters)
@step
def color_name_clusters(self):
"""generate cluster name and color using tf-idf at latest time interval
per cluster. Propogate cluster name and color across time intervals."""
from pec_library.pipeline.timeslice_cluster_network_utils import (
add_cluster_colors,
add_cluster_names,
)
self.subgraph_communities = add_cluster_colors(self.subgraph_communities)
self.subgraph_communities = add_cluster_names(
self.subgraph_communities, self.n_top
)
print("added cluster name and cluster color as node attributes!")
self.next(self.end)
@step
def end(self):
"""Saves dictionary of clustered, named subgraphs based on timeslices
to s3."""
from pec_library import bucket_name
with S3(s3root="s3://" + bucket_name + "/outputs/") as s3:
timeslice_byte_obj = pickle.dumps(self.subgraph_communities)
s3.put(self.library_timeslices, timeslice_byte_obj)
print(
f"successfully saved library data to {'s3://' + bucket_name + '/outputs/' + self.library_timeslices}"
)
if __name__ == "__main__":
TimesliceClusterNetwork()
| nestauk/pec-library | pec_library/pipeline/timeslice_cluster_network_flow.py | timeslice_cluster_network_flow.py | py | 5,120 | python | en | code | 0 | github-code | 50 |
10410594388 | import os
import sys
import shutil
import tempfile
import argparse
from rgitools import funcs
def run(input_dir, output_file):
"""Zips an RGI directory and makes it look like a real one.
Parameters
----------
input_dir : str
path to the RGI directory
output_file : str
path to the output file (without zip ending!)
"""
# First zip the directories and copy the files
bname = os.path.basename(input_dir)
tmpdir = tempfile.mkdtemp()
workdir = os.path.join(tmpdir, bname)
funcs.mkdir(workdir, reset=True)
for fname in os.listdir(input_dir):
abs_p = os.path.join(input_dir, fname)
out_f = os.path.join(workdir, fname)
if os.path.isfile(abs_p):
shutil.copy(abs_p, out_f)
else:
shutil.make_archive(out_f, 'zip', abs_p)
# Compress the working directory
shutil.make_archive(output_file, 'zip', workdir)
# Delete our working dir
shutil.rmtree(tmpdir)
def parse_args(args):
"""Check input arguments"""
# CLI args
description = 'Computes the intersects for an entire RGI directory.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--input-dir', type=str,
help='the rgi directory to process.')
parser.add_argument('--output-file', type=str,
help='path to the output file (without zip ending!)')
args = parser.parse_args(args)
if not args.input_dir:
raise ValueError('--input-dir is required!')
if not args.output_file:
raise ValueError('--output-file is required!')
# All good
return dict(input_dir=args.input_dir, output_file=args.output_file)
def main():
"""Script entry point"""
run(**parse_args(sys.argv[1:]))
| GLIMS-RGI/rgitools | rgitools/cli/zip_rgi_dir.py | zip_rgi_dir.py | py | 1,800 | python | en | code | 14 | github-code | 50 |
23216568117 | from typing import List, Mapping
from pyspark import sql
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.types import StringType, StructField, StructType
def get_null_perc(spark: SparkSession, df: sql.DataFrame, null_cols: List[str]) -> sql.DataFrame:
"""Get null/empty percentage for columns
Args:
spark: SparkSession object
df: dataframe to perform null/empty analysis on
null_cols: list of columns that need to be considered for analysis
Returns:
DataFrame: dataframe with null check analysis
"""
schema = StructType(
[
StructField("Column", StringType(), True),
StructField("NullPercentage", StringType(), True),
]
)
emptyRDD = spark.sparkContext.emptyRDD()
resultdf = spark.createDataFrame(emptyRDD, schema=schema)
for x in null_cols:
df_null_count = df.select(F.col(x)).filter(
F.col(x).isNull() | (F.col(x) == "")).count()
df_null = spark.createDataFrame(
[[x, str(df_null_count * 100.0 / df.count()) + "%"]], schema=schema
)
resultdf = resultdf.union(df_null)
return resultdf
def get_summary_numeric(df: sql.DataFrame, numeric_cols: List[str]) -> sql.DataFrame:
"""Get Summary for numeric columns
Args:
df (DataFrame): dataframe to perform analysis on
numeric_cols (List): list of columns that need to be considered for analysis
Returns:
DataFrame: dataframe with summary analysis
"""
return df.select(numeric_cols).summary()
def get_distinct_counts(
spark: SparkSession, df: sql.DataFrame, aggregate_cols: List[str]
) -> sql.DataFrame:
"""Get distinct count for columns
Args:
spark (Spark): SparkSession object
df (DataFrame): dataframe to perform distinct count analysis on
aggregate_cols (List): list of columns that need to be considered for analysis
Returns:
DataFrame: dataframe with distinct count analysis
"""
schema = StructType(
[
StructField("Column", StringType(), True),
StructField("DistinctCount", StringType(), True),
]
)
emptyRDD = spark.sparkContext.emptyRDD()
resultdf = spark.createDataFrame(emptyRDD, schema=schema)
for x in aggregate_cols:
df_distinct_count = df.select(F.col(x)).distinct().count()
df_distinct = spark.createDataFrame(
[[x, str(df_distinct_count)]], schema=schema)
resultdf = resultdf.union(df_distinct)
return resultdf
def get_distribution_counts(
spark: SparkSession, df: sql.DataFrame, aggregate_cols: List[str]
) -> List[sql.DataFrame]:
"""Get Distribution Counts for columns
Args:
spark (Spark): SparkSession object
df (DataFrame): dataframe to perform null/empty analysis on
aggregate_cols (List): list of columns that need to be considered for analysis
Returns:
Returns a list of DataFrames with the distribution counts for each column
"""
result = []
for i in aggregate_cols:
result.append(df.groupby(F.col(i)).count().sort(F.col("count").desc()))
return result
def get_mismatch_perc(
spark: SparkSession, df: sql.DataFrame, data_quality_cols_regex: Mapping[str, str]
) -> sql.DataFrame:
"""Get Mismatch Percentage for columns
Args:
spark: SparkSession object
df: dataframe to perform null/empty analysis on
data_quality_cols_regex: Dictionary of columns/regex-expression for data quality analysis
Returns:
DataFrame: DataFrame with data quality analysis
"""
schema = StructType(
[
StructField("Column", StringType(), True),
StructField("MismatchPercentage", StringType(), True),
]
)
emptyRDD = spark.sparkContext.emptyRDD()
resultdf = spark.createDataFrame(emptyRDD, schema=schema)
for key, value in data_quality_cols_regex.items():
df_regex_not_like_count = df.select(F.col(key)).filter(
~F.col(key).rlike(value)).count()
df_regex_not_like = spark.createDataFrame(
[[key, str(df_regex_not_like_count * 100.0 / df.count()) + "%"]], schema=schema
)
resultdf = resultdf.union(df_regex_not_like)
return resultdf
| vsocrates/ed-pipeline | src/ed_pipeline/qc/quality_checks.py | quality_checks.py | py | 4,349 | python | en | code | 1 | github-code | 50 |
71786695514 | def solution(commands):
answer = []
arr = ["EMPTY" for _ in range(2500)]
parent = list(range(2500))
for cmd in commands:
c, *a = cmd.split()
if c == "UPDATE":
# 병합된 상태일수 있으니 참조하는 부분을 찾고 그 부분에 업데이트 한다.
if len(a) == 3:
target = parent[(int(a[0])-1)*50+(int(a[1])-1)]
arr[target] = a[2]
else:
arr = updateArr(arr, a[0], a[1])
elif c == "PRINT":
# 참조하는 값을 찾아 answer에 담는다.
target = parent[(int(a[0])-1)*50+(int(a[1])-1)]
answer.append(arr[target])
elif c == "MERGE":
# 같은 칸이면 무시한다.
if a[0] == a[2] and a[1] == a[3]:
continue
# 다른 칸이면 합병한다.
parent = union_parent(parent, arr, int(a[0])-1, int(a[1])-1, int(a[2])-1, int(a[3])-1)
elif c == "UNMERGE":
# 현재 참조하는 값을 구한다.
target = parent[(int(a[0])-1)*50+(int(a[1])-1)]
v = arr[target]
# 합친것을 전부 푼다.
arr, parent = unmerge(parent, arr, target)
# 아까 구해놓은 값을 기준칸에 넣는다.
arr[(int(a[0])-1)*50+(int(a[1])-1)] = v
return answer
# 모든 칸을 돌면서 해당 루트노드면 전부 자기 자신을 루트노드로 놓고 값은 EMPTY로 둔다.
def unmerge(parent, arr, v):
for i in range(2500):
if parent[i] == v:
parent[i] = i
arr[i] = "EMPTY"
return [arr, parent]
# value1은 value2로 전부 바꾼다.
# 여기는 find_parent 함수를 사용하지 않았는데 이유는 결국 합병된 칸들은 루트노드를 참조하고 있기 때문에 자기자신이 무슨값이든 간에 루트노드의 값만 바뀌면 된다.
def updateArr(arr, value1, value2):
for i in range(2500):
if arr[i] == value1:
arr[i] = value2
return arr
# 만약 a와 b가 2개 이상의 병합된 그룹이라면
# a가 b를 참조해야하는 상황이 오게되면 a와 같은 그룹들도 전부 b를 참조하도록 바꾼다.
# n의 범위가 2500밖에 안되기 때문에 무식하게 간다.
def match_parent(parent, v1, v2):
for i in range(2500):
if parent[i] == v1:
parent[i] = v2
return parent
# 각 칸들의 루트노드를 가져온다.
# 만약 둘 중 한곳에만 값이 있으면 값이 있는 곳을 기준으로 병합하고
# 둘 다 있으면 r1, c1가 참조하는 루트노드 기준으로 병합한다.
# 같은 그룹이면 그냥 끝낸다.
def union_parent(parent, arr, r1, c1, r2, c2):
a = parent[r1*50+c1]
b = parent[r2*50+c2]
if arr[a] == "EMPTY" and arr[b] != "EMPTY":
return match_parent(parent, a, b)
if a == b:
return parent
return match_parent(parent, b, a)
# 이 문제는 parent리스트를 얼마나 잘 활용하냐에 따라 달렸었다.
# 참조하는 값을 생각해야 하다보니 print를 하거나 update를 할 때도 결국 대부분 parent를 기준으로 비교하고 업데이트 했다.
# union_parent는 기존에 우리가 알던 방식보다는 쉬운 편이었다.
# 왜냐면 원래는 재귀를 이용해서 한 칸 한 칸 올라가는 방식이었지만, 이 방식은 경로를 압축해서 바로 루트노드를 바라보도록 설계했기 때문이다.
# 그래서 원래는 find_parent라는 함수도 있었어야 했지만, 여기는 바로 루트노드를 바라보기 때문에 parent[index]로 바로 루트노드를 꺼냈다. | JH-TT/Coding_Practice | Programmers/Implementation_P/150366.py | 150366.py | py | 3,676 | python | ko | code | 0 | github-code | 50 |
40629548752 | #coding = 'utf-8'
'''
模块功能:爬虫主模块,实现爬虫配置功能,爬虫GUI
作者:Li Yu
创建时间:2019/05/02
创建地点:武汉大学,湖北,武汉
作者邮箱:2014301610173@whu.edu.cn
'''
from tkinter import *
from tkinter import ttk,Listbox
from tkinter.ttk import *
from dbopr import *
from spider import *
from visualization import *
import os
from datetime import *
# 配置类
class Config(object):
# 初始化
def __init__(self):
super(Config, self).__init__()
self.curdir = os.getcwd() # 当前路径
self.tablename = 'qqzoneinfo'
self.phfilepath = ''
self.account = ''
self.password = ''
self.refresh()
# 更新配置
def refresh(self):
folder = os.path.exists(self.curdir+'/db/')
if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(self.curdir+'/db/')
# phfile = os.path.exists(self.curdir+'/db/phpos.txt') # 存phantomjs.exe的路径
# if not phfile:
# self.phfilepath = ''
# else:
# with open(self.curdir+'/db/phpos.txt','r') as f:
# self.phfilepath = f.readline()
self.phfilepath = self.curdir+'/phantomjs-2.1.1-windows/bin/phantomjs.exe'
accountfile = os.path.exists(self.curdir+'/db/account.txt')
if not accountfile:
self.account = ''
else:
with open(self.curdir+'/db/account.txt','r') as f:
self.account = f.readline()
# GUI类
class tkGUI(object):
config = None # 配置类实例
Spider = None # 爬虫类实例
PaintChart = None # 可视化类实例
# 界面初始化
def __init__(self,conf):
super(tkGUI, self).__init__()
tkGUI.config = conf
window = Tk()
window.geometry('650x400')
window.title('qqzoneSpider | 注意:请合理使用爬虫,以防账号被封。“盗”亦有道 >_<')
self.tab_control = ttk.Notebook(window)
self.db_tab = ttk.Frame(self.tab_control)
self.spider_tab = ttk.Frame(self.tab_control)
self.visul_tab = ttk.Frame(self.tab_control)
self.tab_control.add(self.db_tab, text='数据库')
self.tab_control.add(self.spider_tab, text='爬虫')
self.tab_control.add(self.visul_tab, text='可视化')
self.dbs = []
for filename in os.listdir(tkGUI.config.curdir+'/db/'):
if(filename.split('.')[1] == 'db'):
self.dbs.append(filename)
self.db_init()
self.spider_init()
self.visul_init()
self.tab_control.pack(expand=1, fill='both')
window.mainloop()
# 数据库界面初始化
def db_init(self):
self.dbname_lb = Label(self.db_tab, text= '数据库名',padx=10,pady=10,font=('微软雅黑'))
self.dbname_entry = Entry(self.db_tab,width=30)
self.dbname_btn = Button(self.db_tab,text='创建',command=lambda : createDB(self,self.dbname_entry.get(),tkGUI.config.curdir),bg="#BAE0E8",padx=10,)
self.dbchoose_lb = Label(self.db_tab, text= '浏览数据库',padx=10,pady=10,font=('微软雅黑'))
# self.dbchoose_entry = Entry(self.db_tab,width=30,state='disabled')
column = ('id','comment','cmtnum','likenum','tid','createtime')
self.dbtable_tree = ttk.Treeview(self.db_tab,columns = column,show='headings')
self.dbtable_tree.column('id',width=30)
self.dbtable_tree.column('comment',width=200)
self.dbtable_tree.column('cmtnum',width=50)
self.dbtable_tree.column('likenum',width=50)
self.dbtable_tree.column('tid',width=100)
self.dbtable_tree.column('createtime',width=100)
self.dbtable_tree.heading('id',text='id')
self.dbtable_tree.heading('comment',text='内容')
self.dbtable_tree.heading('cmtnum',text='评论数')
self.dbtable_tree.heading('likenum',text='点赞数')
self.dbtable_tree.heading('tid',text='tid')
self.dbtable_tree.heading('createtime',text='发表时间')
self.dbchoose_cb = Combobox(self.db_tab,values=self.dbs)
if(len(self.dbs) > 0):
self.dbchoose_cb.current(0)
# self.dbchoose_btn = Button(self.db_tab,text='选择',command=lambda : chooseDB(self,tkGUI.config.curdir),bg="#BAE0E8",padx=10,)
self.dbwatch_btn = Button(self.db_tab,text='查看',command=lambda : getDBData(self,self.dbchoose_cb.get(),tkGUI.config.tablename,tkGUI.config.curdir),bg="#BAE0E8",padx=10,)
self.dbdelete_btn = Button(self.db_tab,text='清空',command=lambda : deleteAll(self,self.dbchoose_cb.get(),tkGUI.config.tablename),bg='#BAE0E8',padx=10)
self.dbname_lb.grid(column=0, row=0,sticky=W)
self.dbname_entry.grid(column=1,row=0,sticky=W+E)
self.dbname_btn.grid(column=2,row=0)
self.dbchoose_lb.grid(column=0,row=1,sticky=W)
self.dbchoose_cb.grid(column=1,row=1,sticky=W+E)
# self.dbchoose_btn.grid(column=2,row=1,padx=10)
self.dbwatch_btn.grid(column=2,row=1,padx=10)
self.dbdelete_btn.grid(column=3,row=1,padx=10)
self.dbtable_tree.grid(column=0,row=2,columnspan=4,sticky=E+W,padx=10)
# 爬虫界面初始化
def spider_init(self):
tkGUI.Spider = QQzoneSpider()
ph = StringVar()
ph.set(tkGUI.config.phfilepath)
ac = StringVar()
ac.set(tkGUI.config.account)
# self.phantomjs_lb = Label(self.spider_tab, text= 'phantomjs位置',padx=10,pady=10,font=('微软雅黑'))
# self.phantomjs_entry = Entry(self.spider_tab,width=30,state='disabled',textvariable=ph)
# self.phantomjs_btn = Button(self.spider_tab,text='选择',command=lambda : choosePhantom(self,tkGUI.config),bg="#BAE0E8",padx=10,)
self.choosedb_lb = Label(self.spider_tab,text="选择数据库",padx=10,pady=10,font=('微软雅黑'))
self.choosedb_cb = Combobox(self.spider_tab,values=self.dbs)
if(len(self.dbs) > 0):
self.choosedb_cb.current(0)
self.account_lb = Label(self.spider_tab, text= '你的账号',padx=10,pady=10,font=('微软雅黑'))
self.account_entry = Entry(self.spider_tab,width=30,textvariable=ac)
self.password_lb = Label(self.spider_tab, text= '你的密码',padx=10,pady=10,font=('微软雅黑'))
self.password_entry = Entry(self.spider_tab,width=30,show="*")
self.target_lb = Label(self.spider_tab,text='好友账号',padx=10,pady=10,font=('微软雅黑'))
self.target_entry = Entry(self.spider_tab,width=30)
self.spider_btn = Button(self.spider_tab,text="S1爬取说说",command=lambda : tkGUI.Spider.startSpider(self,tkGUI.config,self.account_entry.get(),self.password_entry.get(),self.choosedb_cb.get(),self.target_entry.get()),bg="#BAE0E8",padx=10)
self.spider_like_btn = Button(self.spider_tab,text="S2爬取点赞数",command=lambda : tkGUI.Spider.likeNumSpider(self,tkGUI.config,self.account_entry.get(),self.password_entry.get(),self.choosedb_cb.get(),self.target_entry.get()),bg="#BAE0E8",padx=10)
self.spider_pb = Progressbar(self.spider_tab)
self.spider_load = Label(self.spider_tab,text = '未开始',padx=10,pady=10)
# self.spider_like_btn['state'] = 'normal'
# self.phantomjs_lb.grid(column=0, row=0,sticky=W)
# self.phantomjs_entry.grid(column=1,row=0,padx=10)
# self.phantomjs_btn.grid(column=2,row=0,padx=10,sticky=W)
self.choosedb_lb.grid(column=0, row=1,sticky=W)
self.choosedb_cb.grid(column=1,row=1,sticky=W+E,padx=10)
self.account_lb.grid(column=0,row=2,sticky=W)
self.account_entry.grid(column=1,row=2)
self.password_lb.grid(column=0,row=3,sticky=W)
self.password_entry.grid(column=1,row=3,padx=10)
self.target_lb.grid(column=0,row=4,sticky=W)
self.target_entry.grid(column=1,row=4,padx=10)
self.spider_btn.grid(column=2,row=4,sticky=E,padx=10)
self.spider_like_btn.grid(column=3,row=4,sticky=E,padx=10)
self.spider_pb.grid(column=0,row=5,columnspan=2,sticky=E+W,pady=0,padx=10)
self.spider_load.grid(column=2,row=5,sticky=W)
# 可视化界面初始化
def visul_init(self):
self.img_scatter = PhotoImage(file = 'img/fsux_scatter.png')
self.img_bar = PhotoImage(file = 'img/fsux_bar.png')
self.img_pie = PhotoImage(file = 'img/fsux_pie.png')
self.img_heatmap = PhotoImage(file = 'img/fsux_heatmap.png')
self.img_wordcloud = PhotoImage(file = 'img/fsux_wordcloud.png')
self.img_regression = PhotoImage(file = 'img/fsux_regression.png')
tkGUI.PaintChart = PaintChart()
self.dbc_lb = Label(self.visul_tab,text='选择数据源',padx=10,pady=10,font=('微软雅黑'))
self.dbc_cb = Combobox(self.visul_tab,values=self.dbs)
if(len(self.dbs) > 0):
self.dbc_cb.current(0)
self.intro_lb = Label(self.visul_tab,text='点击以下按钮,获得分析图。热力图在右侧输入年份',padx=10,pady=10,font=('微软雅黑'))
self.year_entry = Entry(self.visul_tab)
self.btn_bar = Label(self.visul_tab)
self.scatter_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,1),image = self.img_scatter,padx=10,pady=10,width=75,height=75)
self.scatter3d_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,0),image = self.img_scatter,padx=10,pady=10,width=75,height=75)
self.bar_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,3),image = self.img_bar,padx=10,pady=10,width=75,height=75)
self.pie_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,4),image = self.img_pie,padx=10,pady=10,width=75,height=75)
self.heatmap_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,5),image = self.img_heatmap,padx=10,pady=10,width=75,height=75)
self.wordcloud_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,6),image = self.img_wordcloud,padx=10,pady=10,width=75,height=75)
self.regression_btn = Button(self.btn_bar,command=lambda:tkGUI.PaintChart.Draw(self,tkGUI.config.curdir,self.dbc_cb.get(),tkGUI.config.tablename,2),image = self.img_regression,padx=10,pady=10,width=75,height=75)
self.scatter_lb = Label(self.btn_bar,text='散点图(二维)')
self.scatter3d_lb = Label(self.btn_bar,text='散点图(三维)')
self.bar_lb = Label(self.btn_bar,text='柱状图')
self.pie_lb = Label(self.btn_bar,text='南丁玫瑰图')
self.heatmap_lb = Label(self.btn_bar,text='热力图')
self.wordcloud_lb = Label(self.btn_bar,text='词云')
self.regression_lb = Label(self.btn_bar,text='回归曲线')
self.analysis_pb = Progressbar(self.visul_tab)
self.analysis_lb = Label(self.visul_tab,text='未开始')
self.dbc_lb.grid(column=0,row=0,sticky=W)
self.dbc_cb.grid(column=1,row=0,sticky=W+E,padx=10)
self.intro_lb.grid(column=0,row=1,sticky=W)
self.year_entry.grid(column=1,row=1,sticky=E+W,padx=10)
self.btn_bar.grid(column=0,columnspan=2,row=2)
self.scatter_btn.grid(column=0,row=0,padx=5,pady=10)
self.scatter3d_btn.grid(column=1,row=0,padx=5,pady=10)
self.bar_btn.grid(column=2,row=0,padx=5,pady=10)
self.pie_btn.grid(column=3,row=0,padx=5,pady=10)
self.heatmap_btn.grid(column=4,row=0,padx=5,pady=10)
self.wordcloud_btn.grid(column=5,row=0,padx=5,pady=10)
self.regression_btn.grid(column=0,row=2,padx=5,pady=10)
self.scatter_lb.grid(column=0,row=1)
self.scatter3d_lb.grid(column=1,row=1)
self.bar_lb.grid(column=2,row=1)
self.pie_lb.grid(column=3,row=1)
self.heatmap_lb.grid(column=4,row=1)
self.wordcloud_lb.grid(column=5,row=1)
self.regression_lb.grid(column=0,row=3)
self.analysis_pb.grid(column=0,row=3,padx=10,pady=0,sticky=E+W)
self.analysis_lb.grid(column=1,row=3,padx=10,pady=0,sticky=W)
# 主程序入口
if __name__ == '__main__':
conf = Config() # 配置
app = tkGUI(conf) # 启动界面
| ly15927086342/qqzoneSpider | __init__.py | __init__.py | py | 11,459 | python | en | code | 6 | github-code | 50 |
14602816900 | """Add deleted flag to materials
Revision ID: 023d07fbeaf2
Revises: 9a69c04ab912
Create Date: 2019-12-15 00:54:19.931299
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '023d07fbeaf2'
down_revision = '9a69c04ab912'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Course_Material', sa.Column('deleted', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Course_Material', 'deleted')
# ### end Alembic commands ###
| AnvarGaliullin/LSP | migrations/versions/023d07fbeaf2_add_deleted_flag_to_materials.py | 023d07fbeaf2_add_deleted_flag_to_materials.py | py | 688 | python | en | code | 0 | github-code | 50 |
70523627035 | import gettext
import logging
import os
import sys
import osol_install.auto_install.ai_smf_service as aismf
import osol_install.auto_install.create_client as create_client
import osol_install.auto_install.client_control as clientctrl
import osol_install.auto_install.service as svc
import osol_install.auto_install.service_config as config
from optparse import OptionParser
from bootmgmt import BootmgmtError
from osol_install.auto_install.installadm_common import _, \
validate_service_name, cli_wrap as cw
from solaris_install import check_auth_and_euid, SERVICE_AUTH, \
UnauthorizedUserError
SERVICE_PROPS = [config.PROP_DEFAULT_MANIFEST, 'aliasof', 'imagepath']
def get_usage():
"""
get usage for set-service
"""
usage = _(
'set-service\t-o|--option <prop>=<value> <svcname>\n'
'\t\tprop=value can be:\n'
'\t\t\taliasof=<existing_service>\n'
'\t\t\tdefault-manifest=<manifest/script name>\n'
'\t\t\timagepath=<newpath>')
return(usage)
def parse_options(cmd_options=None):
"""
Parse and validate options
Args: Optional cmd_options, used for unit testing. Otherwise, cmd line
options handled by OptionParser
"""
usage = '\n' + get_usage()
parser = OptionParser(usage=usage)
parser.add_option("-o", "--option", dest="propval",
default=None, help=_("Set property of a service:"
"<-o|--option <property>=<value>"))
options, args = parser.parse_args(cmd_options)
# Check for correct number of args
if len(args) != 1:
if not len(args):
err_msg = _("Missing required argument, <svcname>")
else:
err_msg = _("Unexpected argument(s): %s") % args[1:]
parser.error(err_msg)
# Check that we have a property/value
if not options.propval:
parser.error(_("Missing required option, -o|--option."))
options.svcname = args[0]
try:
prop, value = get_prop_val(options.propval)
except ValueError as err:
parser.error(err)
options.prop = prop
options.value = value
return options
def get_prop_val(propval):
'''
get property and value
Extract property and value from user input, propval
Returns: tuple consisting of property, value
Raises: ValueError on malformed name=value string in propval.
'''
parts = propval.partition('=')
if parts[1]:
if not parts[0]:
raise ValueError(_("Missing property name in '%s'\n") % propval)
elif parts[0].lower() not in SERVICE_PROPS:
raise ValueError(_("Unknown property: '%s'\n") % parts[0])
elif not parts[2]:
raise ValueError(_("Missing value for property '%s'\n") % parts[0])
else:
raise ValueError(_("Option must be of the form <property>=<value>\n"))
return parts[0], parts[2]
def do_set_service_default_manifest(options):
'''
Handle default_manifest property processing.
'''
service = svc.AIService(options.svcname)
try:
service.set_default_manifest(options.value)
except ValueError as error:
raise SystemExit(error)
def do_update_basesvc(aliassvc, basesvcname):
'''Updates the baseservice of an alias
Input:
aliassvc - service object of alias
basesvcname - name of baseservice
Returns:
failures - list of errs encountered
'''
logging.debug('do_update_basesvc: alias=%s, basename=%s',
aliassvc.name, basesvcname)
# Remove clients of alias
clients = config.get_clients(aliassvc.name)
for clientid in clients:
logging.debug('removing client %s', clientid)
clientctrl.remove_client(clientid, suppress_dhcp_msgs=True)
failures = list()
try:
aliassvc.update_basesvc(basesvcname)
except (OSError, config.ServiceCfgError, BootmgmtError) as err:
print >> sys.stderr, \
(_("Failed to set %(aliasname)s as alias of: %(bname)s") %
{'aliasname': aliassvc.name, 'bname': basesvcname})
print >> sys.stderr, err
failures.append(err)
except svc.MultipleUnmountError as err:
print >> sys.stderr, _("Failed to disable alias")
print >> sys.stderr, err
failures.append(err)
except svc.MountError as err:
print >> sys.stderr, _("Failed to enable alias")
print >> sys.stderr, err
failures.append(err)
except svc.UnsupportedAliasError as err:
print >> sys.stderr, err
failures.append(err)
# Re-add clients to updated alias
arch = aliassvc.arch
for clientid in clients.keys():
# strip off leading '01'
client = clientid[2:]
bootargs = None
if config.BOOTARGS in clients[clientid]:
bootargs = clients[clientid][config.BOOTARGS]
logging.debug('re-adding clientid=%s, bootargs=%s', clientid, bootargs)
# Don't suppress messages, because user may need to update
# DHCP configuration
try:
create_client.create_new_client(arch, aliassvc, client,
bootargs=bootargs, suppress_dhcp_msgs=False)
except BootmgmtError as err:
failures.append(err)
print >> sys.stderr, (_('\nError: Unable to recreate client, '
'%(client)s:\n%(error)s') %
{'client': client, 'error': err})
return failures
def set_aliasof(options):
'''Change a service's base service'''
logging.debug("set alias %s's basesvc to %s",
options.svcname, options.value)
basesvcname = options.value
aliasname = options.svcname
if not config.is_service(basesvcname):
raise SystemExit(_('\nError: Service does not exist: %s\n') %
basesvcname)
if aliasname == basesvcname:
raise SystemExit(_('\nError: Alias name same as service name: %s\n') %
aliasname)
aliassvc = svc.AIService(aliasname)
if not aliassvc.is_alias():
raise SystemExit(_('\nError: Service exists, but is not an '
'alias: %s\n') % aliasname)
basesvc_arch = svc.AIService(basesvcname).arch
aliassvc_arch = aliassvc.arch
if basesvc_arch != aliassvc_arch:
raise SystemExit(_("\nError: Architectures of service and alias "
"are different.\n"))
if aliassvc.is_aliasof(basesvcname):
raise SystemExit(_("\nError: %(aliasname)s is already an alias "
"of %(svcname)s\n") % {'aliasname': aliasname,
'svcname': basesvcname})
if svc.AIService(basesvcname).is_alias():
raise SystemExit(_("\nError: Cannot alias to another alias.\n"))
# Make sure we aren't creating inter dependencies
all_aliases = config.get_aliased_services(aliasname, recurse=True)
if basesvcname in all_aliases:
raise SystemExit(cw(_("\nError: %(aliasname)s can not be made an "
"alias of %(svcname)s because %(svcname)s is "
"dependent on %(aliasname)s\n") %
{'aliasname': aliasname,
'svcname': basesvcname}))
failures = do_update_basesvc(aliassvc, basesvcname)
if failures:
return 1
return 0
def set_imagepath(options):
'''Change the location of a service's image'''
logging.debug("set %s imagepath to %s",
options.svcname, options.value)
new_imagepath = options.value.strip()
service = svc.AIService(options.svcname)
if service.is_alias():
raise SystemExit(cw(_('\nError: Can not change the imagepath of an '
'alias.')))
if not os.path.isabs(new_imagepath):
raise SystemExit(_("\nError: A full pathname is required for the "
"imagepath.\n"))
if os.path.exists(new_imagepath):
raise SystemExit(_("\nError: The imagepath already exists: %s\n") %
new_imagepath)
if os.path.islink(new_imagepath):
raise SystemExit(_("\nError: The imagepath may not be a symlink.\n"))
new_imagepath = new_imagepath.rstrip('/')
try:
service.relocate_imagedir(new_imagepath)
except (svc.MountError, aismf.ServicesError, BootmgmtError) as error:
raise SystemExit(error)
def do_set_service(cmd_options=None):
'''
Set a property of a service
'''
# check for authorization and euid
try:
check_auth_and_euid(SERVICE_AUTH)
except UnauthorizedUserError as err:
raise SystemExit(err)
options = parse_options(cmd_options)
# validate service name
try:
validate_service_name(options.svcname)
except ValueError as err:
raise SystemExit(str(err))
logging.debug("options %s", options)
if not config.is_service(options.svcname):
raise SystemExit(_('\nError: Service does not exist: %s\n') %
options.svcname)
if options.prop == "default-manifest":
do_set_service_default_manifest(options)
elif options.prop == "aliasof":
return set_aliasof(options)
elif options.prop == "imagepath":
return set_imagepath(options)
# Future set-service options can go here in an "else" clause...
if __name__ == '__main__':
gettext.install("solaris_install_installadm", "/usr/share/locale")
# If invoked from the shell directly, mostly for testing,
# attempt to perform the action.
do_set_service()
| aszeszo/caiman | usr/src/cmd/installadm/set_service.py | set_service.py | py | 9,634 | python | en | code | 3 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.