gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributional MPO learner implementation."""
import time
from typing import List, Optional
import acme
from acme import types
from acme.tf import losses
from acme.tf import networks
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
class DistributionalMPOLearner(acme.Learner):
"""Distributional MPO learner."""
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
target_policy_network: snt.Module,
target_critic_network: snt.Module,
discount: float,
num_samples: int,
target_policy_update_period: int,
target_critic_update_period: int,
dataset: tf.data.Dataset,
observation_network: types.TensorTransformation = tf.identity,
target_observation_network: types.TensorTransformation = tf.identity,
policy_loss_module: Optional[snt.Module] = None,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
dual_optimizer: Optional[snt.Optimizer] = None,
clipping: bool = True,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
# Store online and target networks.
self._policy_network = policy_network
self._critic_network = critic_network
self._target_policy_network = target_policy_network
self._target_critic_network = target_critic_network
# Make sure observation networks are snt.Module's so they have variables.
self._observation_network = tf2_utils.to_sonnet_module(observation_network)
self._target_observation_network = tf2_utils.to_sonnet_module(
target_observation_network)
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.make_default_logger('learner')
# Other learner parameters.
self._discount = discount
self._num_samples = num_samples
self._clipping = clipping
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_policy_update_period = target_policy_update_period
self._target_critic_update_period = target_critic_update_period
# Batch dataset and create iterator.
# TODO(b/155086959): Fix type stubs and remove.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
self._policy_loss_module = policy_loss_module or losses.MPO(
epsilon=1e-1,
epsilon_penalty=1e-3,
epsilon_mean=1e-3,
epsilon_stddev=1e-6,
init_log_temperature=1.,
init_log_alpha_mean=1.,
init_log_alpha_stddev=10.)
# Create the optimizers.
self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)
self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)
self._dual_optimizer = dual_optimizer or snt.optimizers.Adam(1e-2)
# Expose the variables.
policy_network_to_expose = snt.Sequential(
[self._target_observation_network, self._target_policy_network])
self._variables = {
'critic': self._target_critic_network.variables,
'policy': policy_network_to_expose.variables,
}
# Create a checkpointer and snapshotter object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
self._checkpointer = tf2_savers.Checkpointer(
subdirectory='dmpo_learner',
objects_to_save={
'counter': self._counter,
'policy': self._policy_network,
'critic': self._critic_network,
'observation': self._observation_network,
'target_policy': self._target_policy_network,
'target_critic': self._target_critic_network,
'target_observation': self._target_observation_network,
'policy_optimizer': self._policy_optimizer,
'critic_optimizer': self._critic_optimizer,
'dual_optimizer': self._dual_optimizer,
'policy_loss_module': self._policy_loss_module,
'num_steps': self._num_steps,
})
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={
'policy':
snt.Sequential([
self._target_observation_network,
self._target_policy_network
]),
})
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp = None
@tf.function
def _step(self) -> types.NestedTensor:
# Update target network.
online_policy_variables = self._policy_network.variables
target_policy_variables = self._target_policy_network.variables
online_critic_variables = (
*self._observation_network.variables,
*self._critic_network.variables,
)
target_critic_variables = (
*self._target_observation_network.variables,
*self._target_critic_network.variables,
)
# Make online policy -> target policy network update ops.
if tf.math.mod(self._num_steps, self._target_policy_update_period) == 0:
for src, dest in zip(online_policy_variables, target_policy_variables):
dest.assign(src)
# Make online critic -> target critic network update ops.
if tf.math.mod(self._num_steps, self._target_critic_update_period) == 0:
for src, dest in zip(online_critic_variables, target_critic_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
inputs = next(self._iterator)
transitions: types.Transition = inputs.data
# Get batch size and scalar dtype.
batch_size = transitions.reward.shape[0]
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=transitions.discount.dtype)
with tf.GradientTape(persistent=True) as tape:
# Maybe transform the observation before feeding into policy and critic.
# Transforming the observations this way at the start of the learning
# step effectively means that the policy and critic share observation
# network weights.
o_tm1 = self._observation_network(transitions.observation)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t = tf.stop_gradient(
self._target_observation_network(transitions.next_observation))
# Get online and target action distributions from policy networks.
online_action_distribution = self._policy_network(o_t)
target_action_distribution = self._target_policy_network(o_t)
# Sample actions to evaluate policy; of size [N, B, ...].
sampled_actions = target_action_distribution.sample(self._num_samples)
# Tile embedded observations to feed into the target critic network.
# Note: this is more efficient than tiling before the embedding layer.
tiled_o_t = tf2_utils.tile_tensor(o_t, self._num_samples) # [N, B, ...]
# Compute target-estimated distributional value of sampled actions at o_t.
sampled_q_t_distributions = self._target_critic_network(
# Merge batch dimensions; to shape [N*B, ...].
snt.merge_leading_dims(tiled_o_t, num_dims=2),
snt.merge_leading_dims(sampled_actions, num_dims=2))
# Compute average logits by first reshaping them and normalizing them
# across atoms.
new_shape = [self._num_samples, batch_size, -1] # [N, B, A]
sampled_logits = tf.reshape(sampled_q_t_distributions.logits, new_shape)
sampled_logprobs = tf.math.log_softmax(sampled_logits, axis=-1)
averaged_logits = tf.reduce_logsumexp(sampled_logprobs, axis=0)
# Construct the expected distributional value for bootstrapping.
q_t_distribution = networks.DiscreteValuedDistribution(
values=sampled_q_t_distributions.values, logits=averaged_logits)
# Compute online critic value distribution of a_tm1 in state o_tm1.
q_tm1_distribution = self._critic_network(o_tm1, transitions.action)
# Compute critic distributional loss.
critic_loss = losses.categorical(q_tm1_distribution, transitions.reward,
discount * transitions.discount,
q_t_distribution)
critic_loss = tf.reduce_mean(critic_loss)
# Compute Q-values of sampled actions and reshape to [N, B].
sampled_q_values = sampled_q_t_distributions.mean()
sampled_q_values = tf.reshape(sampled_q_values, (self._num_samples, -1))
# Compute MPO policy loss.
policy_loss, policy_stats = self._policy_loss_module(
online_action_distribution=online_action_distribution,
target_action_distribution=target_action_distribution,
actions=sampled_actions,
q_values=sampled_q_values)
# For clarity, explicitly define which variables are trained by which loss.
critic_trainable_variables = (
# In this agent, the critic loss trains the observation network.
self._observation_network.trainable_variables +
self._critic_network.trainable_variables)
policy_trainable_variables = self._policy_network.trainable_variables
# The following are the MPO dual variables, stored in the loss module.
dual_trainable_variables = self._policy_loss_module.trainable_variables
# Compute gradients.
critic_gradients = tape.gradient(critic_loss, critic_trainable_variables)
policy_gradients, dual_gradients = tape.gradient(
policy_loss, (policy_trainable_variables, dual_trainable_variables))
# Delete the tape manually because of the persistent=True flag.
del tape
# Maybe clip gradients.
if self._clipping:
policy_gradients = tuple(tf.clip_by_global_norm(policy_gradients, 40.)[0])
critic_gradients = tuple(tf.clip_by_global_norm(critic_gradients, 40.)[0])
# Apply gradients.
self._critic_optimizer.apply(critic_gradients, critic_trainable_variables)
self._policy_optimizer.apply(policy_gradients, policy_trainable_variables)
self._dual_optimizer.apply(dual_gradients, dual_trainable_variables)
# Losses to track.
fetches = {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
}
fetches.update(policy_stats) # Log MPO stats.
return fetches
def step(self):
# Run the learning step.
fetches = self._step()
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Update our counts and record it.
counts = self._counter.increment(steps=1, walltime=elapsed_time)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
| |
# -*- coding: utf-8 -*-
{
'!langcode!': 'id',
'!langname!': 'Indonesian',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%d days ago': '%d hari yang lalu',
'%d hours ago': '%d jam yang lalu',
'%d minutes ago': '%d menit yang lalu',
'%d months ago': '%d bulan yang lalu',
'%d seconds ago': '%d detik yang lalu',
'%d seconds from now': '%d detik dari sekarang',
'%d weeks ago': '%d minggu yang lalu',
'%d years ago': '%d tahun yang lalu',
'%s %%{row} deleted': '%s %%{row} dihapus',
'%s %%{row} updated': '%s %%{row} diperbarui',
'%s selected': '%s dipilih',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'(requires internet access, experimental)': '(membutuhkan akses internet, eksperimental)',
'(something like "it-it")': '(sesuatu seperti "it-it")',
'1 day ago': '1 hari yang lalu',
'1 hour ago': '1 jam yang lalu',
'1 minute ago': '1 menit yang lalu',
'1 month ago': '1 bulan yang lalu',
'1 second ago': '1 detik yang lalu',
'1 week ago': '1 minggu yang lalu',
'1 year ago': '1 tahun yang lalu',
'< Previous': '< Sebelumnya',
'?': '?',
'About': 'Tentang',
'About application': 'Tentang Aplikasi',
'Access Control': 'Access Control',
'Add': 'Tambah',
'Additional code for your application': 'Tambahan kode untuk aplikasi Anda',
'Address': 'Alamat',
'admin': 'admin',
'Admin language': 'Bahasa Admin',
'administrative interface': 'antarmuka administrative',
'Administrator Password:': 'Administrator Kata Sandi:',
'Ajax Recipes': 'Resep Ajax',
'An error occured, please %s the page': 'Terjadi kesalahan, silakan %s halaman',
'And': 'Dan',
'and rename it:': 'dan memberi nama baru itu:',
'Answer': 'Jawaban',
'appadmin is disabled because insecure channel': 'AppAdmin dinonaktifkan karena kanal tidak aman',
'application "%s" uninstalled': 'applikasi "%s" dihapus',
'application compiled': 'aplikasi dikompilasi',
'Application name:': 'Nama Applikasi:',
'are not used yet': 'tidak digunakan lagi',
'Are you sure you want to delete this object?': 'Apakah Anda yakin ingin menghapus ini?',
'Are you sure you want to uninstall application "%s"?': 'Apakah Anda yakin ingin menghapus aplikasi "%s"?',
'Available Databases and Tables': 'Database dan Tabel yang tersedia',
'Back': 'Kembali',
'Buy this book': 'Beli buku ini',
"Buy web2py's book": "Buy web2py's book",
'Cache': 'Cache',
'cache': 'cache',
'Cache Cleared': 'Cache Cleared',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, kesalahan dan sesi dibersihkan',
'can be a git repo': 'bisa menjadi repo git',
'Cancel': 'Batalkan',
'Cannot be empty': 'Tidak boleh kosong',
'Change admin password': 'Ubah kata sandi admin',
'Change password': 'Ubah kata sandi',
'Check for upgrades': 'Periksa upgrade',
'Check to delete': 'Centang untuk menghapus',
'Checking for upgrades...': 'Memeriksa untuk upgrade...',
'Clean': 'Bersih',
'Clear': 'Hapus',
'Clear CACHE?': 'Hapus CACHE?',
'Clear DISK': 'Hapus DISK',
'Clear RAM': 'Hapus RAM',
'Click row to expand traceback': 'Klik baris untuk memperluas traceback',
'Close': 'Tutup',
'collapse/expand all': 'kempis / memperluas semua',
'Community': 'Komunitas',
'Compile': 'Kompilasi',
'compiled application removed': 'aplikasi yang dikompilasi dihapus',
'Components and Plugins': 'Komponen dan Plugin',
'Config.ini': 'Config.ini',
'contains': 'mengandung',
'Controller': 'Controller',
'Controllers': 'Kontrolir',
'controllers': 'kontrolir',
'Copyright': 'Hak Cipta',
'Count': 'Hitung',
'Create': 'Buat',
'create file with filename:': 'buat file dengan nama:',
'created by': 'dibuat oleh',
'CSV (hidden cols)': 'CSV (kolom tersembunyi)',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'currently running': 'sedang berjalan',
'data uploaded': 'data diunggah',
'Database': 'Database',
'Database %s select': 'Memilih Database %s',
'database administration': 'administrasi database',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'DB Model',
'defines tables': 'mendefinisikan tabel',
'Delete': 'Hapus',
'delete all checked': 'menghapus semua yang di centang',
'Delete this file (you will be asked to confirm deletion)': 'Hapus file ini (Anda akan diminta untuk mengkonfirmasi penghapusan)',
'Delete:': 'Hapus:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Keterangan',
'design': 'disain',
'Design': 'Design',
'direction: ltr': 'petunjuk: ltr',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Dihapus',
'Documentation': 'Dokumentasi',
"Don't know what to do?": 'Tidak tahu apa yang harus dilakukan?',
'done!': 'selesai!',
'Download': 'Unduh',
'Download .w2p': 'Unduh .w2p',
'download layouts': 'unduh layouts',
'download plugins': 'unduh plugins',
'Duration': 'Durasi',
'Edit': 'Mengedit',
'Edit application': 'Mengedit Aplikasi',
'Edit current record': 'Edit current record',
'Email and SMS': 'Email and SMS',
'Email sent': 'Email dikirim',
'enter a valid email address': 'masukkan alamat email yang benar',
'enter a valid URL': 'masukkan URL yang benar',
'enter a value': 'masukkan data',
'Error': 'Kesalahan',
'Error logs for "%(app)s"': 'Catatan kesalahan untuk "%(app)s"',
'Errors': 'Kesalahan',
'export as csv file': 'ekspor sebagai file csv',
'Export:': 'Ekspor:',
'exposes': 'menghadapkan',
'extends': 'meluaskan',
'FAQ': 'FAQ',
'filter': 'menyaring',
'First Name': 'Nama Depan',
'Forgot username?': 'Lupa nama pengguna?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Aplikasi Gratis',
'Gender': 'Jenis Kelamin',
'Gift Day': 'Gift Day',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Grup %(group_id)s dibuat',
'Group uniquely assigned to user %(id)s': 'Grup unik yang diberikan kepada pengguna %(id)s',
'Groups': 'Grup',
'Guest': 'Tamu',
'Hello World': 'Halo Dunia',
'Help': 'Bantuan',
'Helping web2py': 'Helping web2py',
'Home': 'Halaman Utama',
'How did you get here?': 'Bagaimana kamu bisa di sini?',
'Image': 'Gambar',
'import': 'impor',
'Import/Export': 'Impor/Ekspor',
'includes': 'termasuk',
'Install': 'Memasang',
'Installation': 'Instalasi',
'Installed applications': 'Aplikasi yang diinstal',
'Internal State': 'Internal State',
'Introduction': 'Pengenalan',
'Invalid email': 'Email tidak benar',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Key': 'Key',
'Language': 'Bahasa',
'languages': 'bahasa',
'Languages': 'Bahasa',
'Last Name': 'Nama Belakang',
'Layout': 'Layout',
'License for': 'Lisensi untuk',
'Live Chat': 'Live Chat',
'loading...': 'sedang memuat...',
'Log In': 'Log In',
'Logged in': 'Masuk',
'Logged out': 'Keluar',
'Login': 'Masuk',
'Login to the Administrative Interface': 'Masuk ke antarmuka Administrasi',
'Logout': 'Keluar',
'Lost Password': 'Lupa Kata Sandi',
'Lost password?': 'Lupa kata sandi?',
'Maintenance': 'Pemeliharaan',
'Manage': 'Mengelola',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Mengelola Cache',
'Max Value (R$)': 'Max Value (R$)',
'Memberships': 'Memberships',
'Menu Model': 'Menu Model',
'Models': 'Model',
'models': 'model',
'Modules': 'Modul',
'modules': 'modul',
'My Sites': 'Situs Saya',
'Name': 'Name',
'New': 'Baru',
'new application "%s" created': 'aplikasi baru "%s" dibuat',
'New password': 'Kata sandi baru',
'New Record': 'New Record',
'new record inserted': 'new record inserted',
'New simple application': 'Aplikasi baru sederhana',
'News': 'Berita',
'next %s rows': 'next %s rows',
'next 100 rows': '100 baris berikutnya',
'Next >': 'Berikutnya >',
'Next Page': 'Halaman Berikutnya',
'No databases in this application': 'Tidak ada database dalam aplikasi ini',
'No ticket_storage.txt found under /private folder': 'Tidak ditemukan ticket_storage.txt dalam folder /private',
'not a Zip Code': 'bukan Kode Pos',
'Note': 'Catatan',
'Old password': 'Kata sandi lama',
'Online book': 'Online book',
'Online examples': 'Contoh Online',
'Or': 'Atau',
'or alternatively': 'atau alternatif',
'Or Get from URL:': 'Atau Dapatkan dari URL:',
'or import from csv file': 'atau impor dari file csv',
'Other Plugins': 'Plugin Lainnya',
'Other Recipes': 'Resep Lainnya',
'Overview': 'Ikhtisar',
'Overwrite installed app': 'Ikhtisar app yang terinstall',
'Pack all': 'Pak semua',
'Pack compiled': 'Pak yang telah dikompilasi',
'Pack custom': 'Pak secara kustomisasi',
'Password': 'Kata sandi',
'Password changed': 'Kata sandi berubah',
"Password fields don't match": 'Kata sandi tidak sama',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': 'silahkan masukan kata sandi anda lagi',
'plugins': 'plugin',
'Plugins': 'Plugin',
'Plural-Forms:': 'Bentuk-Jamak:',
'Powered by': 'Didukung oleh',
'Preface': 'Pendahuluan',
'previous %s rows': 'previous %s rows',
'previous 100 rows': '100 baris sebelumnya',
'Previous Page': 'Halaman Sebelumnya',
'Private files': 'File pribadi',
'private files': 'file pribadi',
'Profile': 'Profil',
'Profile updated': 'Profil diperbarui',
'Project Progress': 'Perkembangan Proyek',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Query:',
'Quick Examples': 'Contoh Cepat',
'Raffle Date': 'Raffle Date',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Dihapus',
'Recipes': 'Resep',
'Record': 'Record',
'record does not exist': 'record does not exist',
'Record id': 'Record id',
'Register': 'Daftar',
'Registration successful': 'Pendaftaran berhasil',
'reload': 'memuat kembali',
'Reload routes': 'Memuat rute kembali',
'Remember me (for 30 days)': 'Ingat saya (selama 30 hari)',
'Remove compiled': 'Hapus Kompilasi',
'Request reset password': 'Meminta reset kata sandi',
'Role': 'Role',
'Roles': 'Roles',
'Rows in Table': 'Baris dalam Tabel',
'Rows selected': 'Baris dipilih',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Jalankan tes di file ini (untuk menjalankan semua file, Anda juga dapat menggunakan tombol berlabel 'test')",
'Running on %s': 'Berjalan di %s',
'Save model as...': 'Simpan model sebagai ...',
'Save profile': 'Simpan profil',
'Search': 'Cari',
'Select Files to Package': 'Pilih Berkas untuk Paket',
'Send Email': 'Kirim Email',
'Service': 'Layanan',
'Services': 'Services',
'Sign Up': 'Sign Up',
'Site': 'Situs',
'Size of cache:': 'Ukuran cache:',
'starts with': 'dimulai dengan',
'state': 'state',
'Static': 'Statis',
'static': 'statis',
'Statistics': 'Statistik',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Mendukung',
'Table': 'Tabel',
'test': 'tes',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'Logika aplikasi, setiap jalur URL dipetakan dalam satu fungsi terpapar di kontrolir',
'The Core': 'The Core',
'The data representation, define database tables and sets': 'Representasi data, mendefinisikan tabel database dan set',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'There are no plugins': 'Tidak ada plugin',
'There are no private files': 'Tidak ada file pribadi',
'These files are not served, they are only available from within your app': 'File-file ini tidak dilayani, mereka hanya tersedia dari dalam aplikasi Anda',
'These files are served without processing, your images go here': 'File-file ini disajikan tanpa pengolahan, gambar Anda di sini',
'This App': 'App Ini',
'Time in Cache (h:m:s)': 'Waktu di Cache (h: m: s)',
'To create a plugin, name a file/folder plugin_[name]': 'Untuk membuat sebuah plugin, nama file / folder plugin_ [nama]',
'too short': 'terlalu pendek',
'Traceback': 'Traceback',
'Translation strings for the application': 'Terjemahan string untuk aplikasi',
'Try the mobile interface': 'Coba antarmuka ponsel',
'Twitter': 'Twitter',
'Unable to download because:': 'Tidak dapat mengunduh karena:',
'unable to parse csv file': 'tidak mampu mengurai file csv',
'update all languages': 'memperbarui semua bahasa',
'Update:': 'Perbarui:',
'Upload': 'Unggah',
'Upload a package:': 'Unggah sebuah paket:',
'Upload and install packed application': 'Upload dan pasang aplikasi yang dikemas',
'upload file:': 'unggah file:',
'upload plugin file:': 'unggah file plugin:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s Logged-in': 'Pengguna %(id)s Masuk',
'User %(id)s Logged-out': 'Pengguna %(id)s Keluar',
'User %(id)s Password changed': 'Pengguna %(id)s Kata Sandi berubah',
'User %(id)s Password reset': 'Pengguna %(id)s Kata Sandi telah direset',
'User %(id)s Profile updated': 'Pengguna %(id)s Profil diperbarui',
'User %(id)s Registered': 'Pengguna %(id)s Terdaftar',
'Users': 'Users',
'value already in database or empty': 'data sudah ada dalam database atau kosong',
'value not allowed': 'data tidak benar',
'value not in database': 'data tidak ada dalam database',
'Verify Password': 'Verifikasi Kata Sandi',
'Version': 'Versi',
'Videos': 'Videos',
'View': 'Lihat',
'Views': 'Lihat',
'views': 'lihat',
'Web Framework': 'Kerangka Web',
'web2py is up to date': 'web2py terbaru',
'web2py Recent Tweets': 'Tweet web2py terbaru',
'Website': 'Situs Web',
'Welcome': 'Selamat Datang',
'Welcome to web2py!': 'Selamat Datang di web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'Wish': 'Wish',
'Wish Image': 'Wish Image',
'Working...': 'Working...',
'You are successfully running web2py': 'Anda berhasil menjalankan web2py',
'You can modify this application and adapt it to your needs': 'Anda dapat memodifikasi aplikasi ini dan menyesuaikan dengan kebutuhan Anda',
'You visited the url %s': 'Anda mengunjungi url %s',
}
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_concurrency import processutils as putils
from oslo_utils import timeutils
from cinder import context
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.targets import lio
class TestLioAdmDriver(test.TestCase):
def setUp(self):
super(TestLioAdmDriver, self).setUp()
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get)
self.configuration.iscsi_ip_address = '10.9.8.7'
self.fake_volumes_dir = '/tmp/tmpfile'
self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
self.fake_volume_id = '83c2e877-feed-46be-8435-77884fe55b45'
with mock.patch.object(lio.LioAdm, '_verify_rtstool'):
self.target = lio.LioAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.fake_iscsi_scan = ('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45')
self.target.db = mock.MagicMock(
volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
self.testvol =\
{'project_id': self.fake_project_id,
'name': 'volume-%s' % self.fake_volume_id,
'size': 1,
'id': self.fake_volume_id,
'volume_type_id': None,
'provider_location': '10.9.8.7:3260 '
'iqn.2010-10.org.openstack:'
'volume-%s 0' % self.fake_volume_id,
'provider_auth': 'CHAP c76370d66b 2FE0CQ8J196R',
'provider_geometry': '512 512',
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'}
def fake_safe_get(self, value):
if value == 'volumes_dir':
return self.fake_volumes_dir
elif value == 'iscsi_protocol':
return self.configuration.iscsi_protocol
elif value == 'iscsi_target_prefix':
return self.iscsi_target_prefix
def test_get_target(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertEqual('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45',
self.target._get_target('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45'))
def test_get_iscsi_target(self):
ctxt = context.get_admin_context()
expected = 0
self.assertEqual(expected,
self.target._get_iscsi_target(ctxt,
self.testvol['id']))
def test_get_target_and_lun(self):
lun = 0
iscsi_target = 0
ctxt = context.get_admin_context()
expected = (iscsi_target, lun)
self.assertEqual(expected,
self.target._get_target_and_lun(ctxt, self.testvol))
def test_get_target_chap_auth(self):
ctxt = context.get_admin_context()
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(('foo', 'bar'),
self.target._get_target_chap_auth(ctxt, test_vol))
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_target')
def test_create_iscsi_target(self, mget_target, mexecute):
mget_target.return_value = 1
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_target')
def test_create_iscsi_target_already_exists(self, mget_target, mexecute):
mexecute.side_effect = putils.ProcessExecutionError
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
chap_auth = ('foo', 'bar')
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
test_vol,
1,
0,
self.fake_volumes_dir,
chap_auth)
@mock.patch.object(utils, 'execute')
def test_remove_iscsi_target(self, mexecute):
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
# Test the normal case
self.target.remove_iscsi_target(0,
0,
self.testvol['id'],
self.testvol['name'])
mexecute.assert_called_once_with('cinder-rtstool',
'delete',
test_vol,
run_as_root=True)
# Test the failure case: putils.ProcessExecutionError
mexecute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetRemoveFailed,
self.target.remove_iscsi_target,
0,
0,
self.testvol['id'],
self.testvol['name'])
@mock.patch.object(lio.LioAdm, '_get_target_chap_auth')
@mock.patch.object(lio.LioAdm, 'create_iscsi_target')
def test_ensure_export(self, _mock_create, mock_get_chap):
ctxt = context.get_admin_context()
mock_get_chap.return_value = ('foo', 'bar')
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
_mock_create.assert_called_once_with(
test_vol,
0, 0, self.fake_volumes_dir, ('foo', 'bar'),
check_exit_code=False,
old_name=None)
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_iscsi_properties')
def test_initialize_connection(self, mock_get_iscsi, mock_execute):
connector = {'initiator': 'fake_init'}
# Test the normal case
mock_get_iscsi.return_value = 'foo bar'
expected_return = {'driver_volume_type': 'iscsi',
'data': 'foo bar'}
self.assertEqual(expected_return,
self.target.initialize_connection(self.testvol,
connector))
mock_execute.assert_called_once_with(
'cinder-rtstool', 'add-initiator',
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45',
'c76370d66b', '2FE0CQ8J196R',
connector['initiator'],
run_as_root=True)
# Test the failure case: putils.ProcessExecutionError
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetAttachFailed,
self.target.initialize_connection,
self.testvol,
connector)
@mock.patch.object(utils, 'execute')
def test_terminate_connection(self, _mock_execute):
connector = {'initiator': 'fake_init'}
self.target.terminate_connection(self.testvol,
connector)
_mock_execute.assert_called_once_with(
'cinder-rtstool', 'delete-initiator',
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45',
connector['initiator'],
run_as_root=True)
@mock.patch.object(utils, 'execute')
def test_terminate_connection_fail(self, _mock_execute):
_mock_execute.side_effect = putils.ProcessExecutionError
connector = {'initiator': 'fake_init'}
self.assertRaises(exception.ISCSITargetDetachFailed,
self.target.terminate_connection,
self.testvol,
connector)
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iscsi')
| |
#! /usr/bin/env python
import numpy
import random
import copy
from itertools import permutations
class graph:
'''A graph data structure.
The data structure consists of a dictionary for which each element is a
node. Each node stores a dictionary of all other nodes to which the node
is connected along with the length of the edge.
'''
def __init__(self, name=None):
'''Initialize the graph.
Parameters:
name: any type
The name of the node. This should usually be a short string or an
integer.
'''
if name is None:
self.g = {}
elif type(name) is dict:
for elem in name:
if type(name[elem]) is not dict:
raise ValueError('graph.__init__(): cast to graph failed!')
self.g = {}
for elem in name:
self.g[elem] = name[elem]
else:
self.g = {name : {}}
def __getitem__(self, i):
'''Return the edges connected to a specific node.'''
if i not in self.g:
raise ValueError('node not in graph!')
return self.g[i]
def add_node(self, name, edges=None):
'''Add a node to the graph.
Parameters:
name: any type
The name of the node. This should usually be a short string or an
integer.
edges: dict, optional
The nodes this new node connects to. The key for each element of
this dictionary is the name of an adjacent node and the associated
value is the length of the corresponding edge.
'''
#
# First perform some checks on the received data types.
#
# Make sure that the node is not already in the graph
if name in self.g:
raise ValueError('graph.add_node(): node already in graph!')
if edges is None:
self.g[name] = {}
else:
# Make sure that the edges are a dictionary
if type(edges) is not dict:
raise TypeError('graph.add_node(): edges must be a dictionary!')
for elem in edges:
# Make sure the length is some kind of number
if not isinstance(edges[elem], (int, long, float)):
raise TypeError('graph.add_node(): ' + str(elem) +
': edge length is not a number!')
# Check that the edges are to nodes already in the graph
if elem not in self.g:
raise ValueError('graph.add_node(): ' + str(elem) +
': edge connects to nonexistant node!')
# First add the node
self.g[name] = edges
# Now update all the adjacent nodes
for adj_node in edges:
self.g[adj_node][name] = edges[adj_node]
def add_edge(self, x, y, length):
'''Add an edge between nodes x and y with given length. If an edge
already connects x and y, this function changes the length.
Parameters:
x: node
y: node
length: int, float, or long
'''
# First check that the nodes are in the graph.
if x not in self.g:
raise ValueError('graph.add_node(): ' + str(x) + ' not in graph!')
if y not in self.g:
raise ValueError('graph.add_node(): ' + str(y) + ' not in graph!')
# Check that the length is a number
if type(length) not in [int, float, long]:
raise ValueError('graph.add_node(): length must be a number!')
self.g[x][y] = length
self.g[y][x] = length
def neighbors(self, x):
'''Returns all nodes with an edge connecting them to x.
Parameters:
x: node
A node in the graph.
Returns:
list
A list of all nodes connected to the given node.
'''
if x not in self.g:
raise ValueError('graph.neighbors():' + str(x) + ' not in graph!')
return self.g[x].keys()
def delete_node(self, x):
'''Delete a node from the graph.
Parameters:
x: node
The node to be deleted
'''
if x not in self.g:
raise ValueError('delete_node(): node not in graph!')
# Delete the edges connecting the node
for node in self.neighbors(x):
del self.g[node][x]
# Delete the node itself
del self.g[x]
def get_edge(self, x, y):
'''Returns the value of the edge connecting x and y.
Parameters:
x: node
A node in the graph
y: node
Another node in the graph
Returns:
number
The length of the edge connecting x and y
'''
# First check that there exists an edge connecting x and y.
if y not in self.g[x]:
raise ValueError('graph.get_edge(): no edge connecting the nodes!')
return self.g[x][y]
def count_nodes(self):
'''Return the number of nodes in the graph.'''
return len(self.g)
def nodes(self):
'''Return the nodes in the graph.'''
return self.g.keys()
def shortest_path(g, a, b):
'''Caculate the shortest path from a to b in a graph g by brute force.
Parameters:
g: graph
The graph on which to find the shortest route
a: node
Starting node
b: node
Ending node
Returns:
shortest_path_length: number
The length of the shortest path
shortest_path_nodes: tuple
A tuple consisting of the nodes along the shortest path.
'''
if a == b:
# Trivial case of starting and ending on the same node.
return (0, [b])
elif len(g.neighbors(a)) == 0:
# In this case we've reached a dead end.
return (numpy.inf, [])
else:
shortest_path_length = numpy.inf
shortest_path_nodes = []
for adj_node in g[a]:
path_length = g[a][adj_node]
h = copy.deepcopy(g)
h.delete_node(a)
sub_path_length, sub_path_nodes = shortest_path(h, adj_node, b)
path_length += sub_path_length
if path_length < shortest_path_length:
shortest_path_length = path_length
shortest_path_nodes = sub_path_nodes
shortest_path_nodes.insert(0, a)
return (shortest_path_length, shortest_path_nodes)
def salesman_brute(g):
'''Calculate the shortest tour that visits every node of the graph, G, and
returns to the starting node.
Parameters:
g: graph
The graph on which to find the shortest tour
Returns:
shortest_tour_length: number
The length of the shortest tour
shortest_tour_nodes: tuple
A tuple consisting of the nodes along the shortest tour
'''
# Randomly pick a node to start.
start_node = random.choice(g.nodes())
permute_nodes = g.nodes()
permute_nodes.remove(start_node)
min_length = numpy.inf
for path in permutations(permute_nodes):
prev_node = start_node
path_length = 0
for node in path:
path_length += shortest_path(g, prev_node, node)[0]
prev_node = node
path_length += shortest_path(g, node, start_node)[0]
if path_length < min_length:
min_length = path_length
shortest_tour = list(path)
shortest_tour.insert(0, start_node)
shortest_tour.append(start_node)
return (min_length, shortest_tour)
if __name__ == '__main__':
# Set up a default graph
G = graph()
G.add_node('A')
G.add_node('B')
G.add_node('C')
G.add_node('D')
G.add_node('E')
G.add_node('F')
G.add_edge('A', 'B', 10)
G.add_edge('A', 'C', 20)
G.add_edge('A', 'E', 50)
G.add_edge('B', 'D', 24)
G.add_edge('B', 'C', 15)
G.add_edge('B', 'E', 30)
G.add_edge('C', 'E', 6)
G.add_edge('C', 'F', 12)
G.add_edge('D', 'E', 15)
G.add_edge('D', 'F', 40)
G.add_edge('E', 'F', 18)
TOUR = salesman_brute(G)
print "Optimum tour:", TOUR[1]
print "Tour length:", TOUR[0]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
##############################################################################
import scapy.config
import scapy.fields
import scapy.layers.dot11
import scapy.packet
import scapy.utils
##############################################################################
from pcapdevice import PcapDevice
##############################################################################
# Suppress useless warnings from scapy...
scapy.config.conf.logLevel = 40
##############################################################################
def str2hex(string):
"""Convert a string to it's hex-decimal representation."""
return ''.join('%02x' % c for c in map(ord, string))
scapy.config.Conf.l2types.register_num2layer(
119,
scapy.layers.dot11.PrismHeader
)
def isFlagSet(self, name, value):
"""Return True if the given field 'includes' the given value.
Exact behaviour of this function is specific to the field-type.
"""
field, val = self.getfield_and_val(name)
if isinstance(field, scapy.fields.EnumField):
if val not in field.i2s:
return False
return field.i2s[val] == value
else:
return (1 << field.names.index([value])) & self.__getattr__(name) != 0
scapy.packet.Packet.isFlagSet = isFlagSet
del isFlagSet
def areFlagsSet(self, name, values):
"""Return True if the given field 'includes' all of the given values."""
return all(self.isFlagSet(name, value) for value in values)
scapy.packet.Packet.areFlagsSet = areFlagsSet
del areFlagsSet
def areFlagsNotSet(self, name, values):
"""Return True if the given field 'includes' none of the given values."""
return all(not self.isFlagSet(name, value) for value in values)
scapy.packet.Packet.areFlagsNotSet = areFlagsNotSet
del areFlagsNotSet
def iterSubPackets(self, cls):
"""Iterate over all layers of the given type in packet 'self'."""
try:
if cls not in self:
return
elt = self[cls]
while elt:
yield elt
elt = elt[cls:2]
except IndexError:
return
scapy.packet.Packet.iterSubPackets = iterSubPackets
del iterSubPackets
class XStrFixedLenField(scapy.fields.StrFixedLenField):
"""String-Field with nice repr() for hexdecimal strings"""
def i2repr(self, pkt, x):
return str2hex(scapy.fields.StrFixedLenField.i2m(self, pkt, x))
class XStrLenField(scapy.fields.StrLenField):
"""String-Field of variable size with nice repr() for hexdecimal strings"""
def i2repr(self, pkt, x):
return str2hex(scapy.fields.StrLenField.i2m(self, pkt, x))
class EAPOL_Key(scapy.packet.Packet):
"""EAPOL Key frame"""
name = "EAPOL Key"
fields_desc = [scapy.fields.ByteEnumField(
"DescType", 254, {2: "RSN Key", 254: "WPA Key"}
)]
scapy.packet.bind_layers(scapy.layers.l2.EAPOL, EAPOL_Key, type=3)
class EAPOL_AbstractEAPOLKey(scapy.packet.Packet):
"""Base-class for EAPOL WPA/RSN-Key frames"""
fields_desc = [
scapy.fields.FlagsField(
"KeyInfo", 0, 16, [
"HMAC_MD5_RC4", "HMAC_SHA1_AES", "undefined",
"pairwise", "idx1", "idx2", "install",
"ack", "mic", "secure", "error", "request", "encrypted"
]
),
scapy.fields.ShortField("KeyLength", 0),
scapy.fields.LongField("ReplayCounter", 0),
XStrFixedLenField("Nonce", '\x00' * 32, 32),
XStrFixedLenField("KeyIV", '\x00' * 16, 16),
XStrFixedLenField("WPAKeyRSC", '\x00' * 8, 8),
XStrFixedLenField("WPAKeyID", '\x00' * 8, 8),
XStrFixedLenField("WPAKeyMIC", '\x00' * 16, 16),
scapy.fields.ShortField("WPAKeyLength", 0),
scapy.fields.ConditionalField(
XStrLenField("WPAKey", None,
length_from=lambda pkt: pkt.WPAKeyLength),
lambda pkt: pkt.WPAKeyLength > 0
)]
class EAPOL_WPAKey(EAPOL_AbstractEAPOLKey):
name = "EAPOL WPA Key"
keyscheme = 'HMAC_MD5_RC4'
scapy.packet.bind_layers(EAPOL_Key, EAPOL_WPAKey, DescType=254)
class EAPOL_RSNKey(EAPOL_AbstractEAPOLKey):
name = "EAPOL RSN Key"
keyscheme = 'HMAC_SHA1_AES'
scapy.packet.bind_layers(EAPOL_Key, EAPOL_RSNKey, DescType=2)
class AccessPoint(object):
def __init__(self, mac):
self.mac = mac
self.essidframe = None
self.essid = None
self.stations = {}
def __iter__(self):
return self.stations.values().__iter__()
def __str__(self):
return self.mac
def __contains__(self, mac):
return mac in self.stations
def __getitem__(self, mac):
return self.stations[mac]
def __setitem__(self, mac, station):
self.stations[mac] = station
def __len__(self):
return len(self.stations)
def getCompletedAuthentications(self):
"""Return list of completed Authentication."""
auths = []
for station in self.stations.itervalues():
auths.extend(station.getAuthentications())
return auths
def isCompleted(self):
"""Returns True if this instance includes at least one valid
authentication.
"""
return any(station.isCompleted() for station in self)
class Station(object):
def __init__(self, mac, ap):
self.ap = ap
self.mac = mac
self.frames = {}
def __str__(self):
return self.mac
def __iter__(self):
return self.getAuthentications().__iter__()
def __len__(self):
return len(self.auths)
def addAuthenticationFrame(self, idx, pckt_idx, pckt):
if idx == 0:
return self.addChallengeFrame(pckt_idx, pckt)
elif idx == 1:
return self.addResponseFrame(pckt_idx, pckt)
elif idx == 2:
return self.addConfirmationFrame(pckt_idx, pckt)
else:
raise IndexError("Invalid authentication-phase.")
def addChallengeFrame(self, pckt_idx, pckt):
"""Store a packet that contains the EAPOL-challenge"""
frames = self.frames.setdefault(pckt.ReplayCounter, ({}, {}, {}))
if pckt.Nonce not in frames[0]:
frames[0][pckt.Nonce] = (pckt_idx, pckt)
return self._buildAuthentications(
{pckt.Nonce: (pckt_idx, pckt)},
frames[1], frames[2]
)
def addResponseFrame(self, pckt_idx, pckt):
"""Store a packet that contains the EAPOL-response"""
frames = self.frames.setdefault(pckt.ReplayCounter, ({}, {}, {}))
if EAPOL_WPAKey in pckt:
keypckt = pckt[EAPOL_WPAKey]
elif EAPOL_RSNKey in pckt:
keypckt = pckt[EAPOL_RSNKey]
else:
raise TypeError("No key-frame in packet")
# WPAKeys 'should' set HMAC_MD5_RC4, RSNKeys HMAC_SHA1_AES
# However we've seen cases where a WPAKey-packet sets
# HMAC_SHA1_AES in it's KeyInfo-field (see issue #111)
if keypckt.isFlagSet('KeyInfo', EAPOL_WPAKey.keyscheme):
version = EAPOL_WPAKey.keyscheme
elif keypckt.isFlagSet('KeyInfo', EAPOL_RSNKey.keyscheme):
version = EAPOL_RSNKey.keyscheme
else:
# Fallback to packet-types's own default, in case the
# KeyScheme is never set. Should not happen...
version = keypckt.keyscheme
# We need a revirginized version of the EAPOL-frame which produced
# that MIC.
keymic_frame = pckt[scapy.layers.dot11.EAPOL].copy()
keymic_frame.WPAKeyMIC = '\x00' * len(keymic_frame.WPAKeyMIC)
# Strip padding and cruft from frame
keymic_frame = str(keymic_frame)[:keymic_frame.len + 4]
response = (version, keypckt.Nonce, keymic_frame, keypckt.WPAKeyMIC)
if response not in frames[1]:
frames[1][response] = (pckt_idx, pckt)
return self._buildAuthentications(
frames[0],
{response: (pckt_idx, pckt)},
frames[2]
)
def addConfirmationFrame(self, pckt_idx, pckt):
"""Store a packet that contains the EAPOL-confirmation"""
frames = self.frames.setdefault(pckt.ReplayCounter - 1, ({}, {}, {}))
if pckt.Nonce not in frames[2]:
frames[2][pckt.Nonce] = (pckt_idx, pckt)
return self._buildAuthentications(
frames[0], frames[1], {pckt.Nonce: (pckt_idx, pckt)}
)
def _buildAuthentications(self, f1_frames, f2_frames, f3_frames):
auths = []
for (version, snonce, keymic_frame, WPAKeyMIC), \
(f2_idx, f2) in f2_frames.iteritems():
# Combinations with Frame3 are of higher value as the AP
# acknowledges that the STA used the correct PMK in Frame2
for anonce, (f3_idx, f3) in f3_frames.iteritems():
if anonce in f1_frames:
# We have F1+F2+F3. Frame2 is only cornered by the
# ReplayCounter. Technically we don't benefit
# from this combination any more than just
# F2+F3 but this is the best we can get.
f1_idx, f1 = f1_frames[anonce]
spread = min(abs(f3_idx - f2_idx), abs(f1_idx - f2_idx))
auth = EAPOLAuthentication(
self, version, snonce, anonce,
WPAKeyMIC, keymic_frame, 0, spread, (f1, f2, f3)
)
else:
# There are no matching first-frames. That's OK.
spread = abs(f3_idx - f2_idx)
auth = EAPOLAuthentication(
self, version, snonce,
anonce, WPAKeyMIC, keymic_frame,
1, spread, (None, f2, f3)
)
auths.append(auth)
for anonce, (f1_idx, f1) in f1_frames.iteritems():
# No third frame. Combinations with Frame1 are possible but
# can also be triggered by STAs that use an incorrect PMK.
spread = abs(f1_idx - f2_idx)
if anonce not in f3_frames:
auth = EAPOLAuthentication(
self, version, snonce,
anonce, WPAKeyMIC, keymic_frame,
2, spread, (f1, f2, None)
)
auths.append(auth)
return auths
def getAuthentications(self):
"""Reconstruct a list of EAPOLAuthentications from captured
handshake-packets. Best matches come first.
"""
auths = []
for frames in self.frames.itervalues():
auths.extend(self._buildAuthentications(*frames))
return sorted(auths)
def isCompleted(self):
"""Returns True if this instance includes at least one valid
authentication.
"""
return len(self.getAuthentications()) > 0
class EAPOLAuthentication(object):
def __init__(
self, station, version, snonce, anonce, keymic,
keymic_frame, quality, spread, frames=None
):
self.station = station
self.version = version
self.snonce = snonce
self.anonce = anonce
self.keymic = keymic
self.keymic_frame = keymic_frame
self.quality = quality
self.spread = spread
self.frames = frames
def getpke(self):
pke = "Pairwise key expansion\x00" \
+ ''.join(sorted((
scapy.utils.mac2str(self.station.ap.mac),
scapy.utils.mac2str(self.station.mac)
))) \
+ ''.join(sorted((self.snonce, self.anonce))) \
+ '\x00'
return pke
pke = property(getpke)
def __lt__(self, other):
if isinstance(other, EAPOLAuthentication):
return (self.quality, self.spread) < (other.quality, other.spread)
else:
return self < other
def __gt__(self, other):
return not self < other
def __str__(self):
quality = ['good', 'workable', 'bad'][self.quality]
return "%s, %s, spread %s" % (self.version, quality, self.spread)
class Dot11PacketWriter(object):
def __init__(self, pcapfile):
self.writer = scapy.utils.PcapWriter(
pcapfile, linktype=105, gz=pcapfile.endswith('.gz'), sync=True
)
self.pcktcount = 0
def write(self, pckt):
if not scapy.layers.dot11.Dot11 in pckt:
raise RuntimeError("No Dot11-frame in packet.")
self.writer.write(pckt[scapy.layers.dot11.Dot11])
self.pcktcount += 1
def close(self):
self.writer.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class PacketParser(object):
"""Parse packets from a capture-source and reconstruct AccessPoints,
Stations and EAPOLAuthentications from the data.
"""
def __init__(
self,
g=None,
new_ap_callback=None,
new_station_callback=None,
new_keypckt_callback=None,
new_auth_callback=None,
new_stop_parsing_callback=None,
new_pkt_callback=None,
use_bpf=True
):
self.air = {}
self.pcktcount = 0
self.dot11_pcktcount = 0
self.new_ap_callback = new_ap_callback
self.new_station_callback = new_station_callback
self.new_keypckt_callback = new_keypckt_callback
self.new_auth_callback = new_auth_callback
self.stop_callback = new_stop_parsing_callback
self.pkt_callback = new_pkt_callback
self.use_bpf = use_bpf
self.g = g
def _find_ssid(self, pckt):
for elt_pckt in pckt.iterSubPackets(scapy.layers.dot11.Dot11Elt):
if elt_pckt.isFlagSet('ID', 'SSID') and \
len(elt_pckt.info) == elt_pckt.len and \
not all(c == '\x00' for c in elt_pckt.info):
return elt_pckt.info
def _add_ap(self, ap_mac, pckt):
ap = self.air.setdefault(ap_mac, AccessPoint(ap_mac))
if ap.essid is None:
essid = self._find_ssid(pckt)
if essid is not None:
ap.essid = essid
ap.essidframe = pckt.copy()
if self.new_ap_callback is not None:
self.new_ap_callback(ap)
def _add_station(self, ap, sta_mac):
if sta_mac not in ap:
sta = Station(sta_mac, ap)
ap[sta_mac] = sta
if self.new_station_callback is not None:
self.new_station_callback(sta)
def _add_keypckt(self, station, idx, pckt):
new_auths = station.addAuthenticationFrame(idx, self.pcktcount, pckt)
if self.new_keypckt_callback is not None:
self.new_keypckt_callback((station, idx, pckt))
if new_auths is not None and self.new_auth_callback is not None:
for auth in new_auths:
self.new_auth_callback((station, auth))
def parse_pcapdevice(self, reader):
"""Parse all packets from a instance of PcapDevice.
This method can be very fast as it updates PcapDevice's BPF-filter
to exclude unwanted packets from Stations once we are aware of
their presence.
"""
if not isinstance(reader, PcapDevice):
raise TypeError("Argument must be of type PcapDevice")
sta_callback = self.new_station_callback
ap_callback = self.new_ap_callback
for pckt in reader:
self.parse_packet(pckt)
self.pkt_callback(pckt)
if self.stop_callback(pckt):
break
self.new_station_callback = sta_callback
self.new_ap_callback = ap_callback
def _add_alone_device(self, pkt):
# TODO:
pass
def parse_packet(self, pckt):
"""Parse one packet"""
self.pcktcount += 1
if not scapy.layers.dot11.Dot11 in pckt:
return
dot11_pckt = pckt[scapy.layers.dot11.Dot11]
self.dot11_pcktcount += 1
if dot11_pckt.isFlagSet('type', 'Control'):
return
# Get a AP and a ESSID from a Beacon
if scapy.layers.dot11.Dot11Beacon in dot11_pckt:
self._add_ap(dot11_pckt.addr2, dot11_pckt)
return
# Get a AP and it's ESSID from a AssociationRequest
if scapy.layers.dot11.Dot11AssoReq in dot11_pckt:
self._add_ap(dot11_pckt.addr1, dot11_pckt)
# Get a AP and it's ESSID from a ProbeResponse
if scapy.layers.dot11.Dot11ProbeResp in dot11_pckt:
self._add_ap(dot11_pckt.addr2, dot11_pckt)
if scapy.layers.dot11.Dot11ProbeReq in dot11_pckt:
self._add_alone_device(dot11_pckt)
# From now on we are only interested in unicast packets
if dot11_pckt.isFlagSet('FCfield', 'to-DS') \
and not int(dot11_pckt.addr2[1], 16) & 1:
ap_mac = dot11_pckt.addr1
sta_mac = dot11_pckt.addr2
elif dot11_pckt.isFlagSet('FCfield', 'from-DS') \
and not int(dot11_pckt.addr1[1], 16) & 1:
ap_mac = dot11_pckt.addr2
sta_mac = dot11_pckt.addr1
else:
return
# May result in 'anonymous' AP
self._add_ap(ap_mac, dot11_pckt)
ap = self.air[ap_mac]
self._add_station(ap, sta_mac)
sta = ap[sta_mac]
if EAPOL_WPAKey in dot11_pckt:
wpakey_pckt = dot11_pckt[EAPOL_WPAKey]
elif EAPOL_RSNKey in dot11_pckt:
wpakey_pckt = dot11_pckt[EAPOL_RSNKey]
else:
return
# Frame 1: pairwise set, install unset, ack set, mic unset
# results in ANonce
if wpakey_pckt.areFlagsSet('KeyInfo', ('pairwise', 'ack')) \
and wpakey_pckt.areFlagsNotSet('KeyInfo', ('install', 'mic')):
self._add_keypckt(sta, 0, pckt)
# Frame 2: pairwise set, install unset, ack unset, mic set,
# SNonce != 0. Results in SNonce, MIC and keymic_frame
elif wpakey_pckt.areFlagsSet('KeyInfo', ('pairwise', 'mic')) \
and wpakey_pckt.areFlagsNotSet('KeyInfo', ('install', 'ack')) \
and not all(c == '\x00' for c in wpakey_pckt.Nonce):
self._add_keypckt(sta, 1, pckt)
# Frame 3: pairwise set, install set, ack set, mic set
# Results in ANonce
elif wpakey_pckt.areFlagsSet(
'KeyInfo',
('pairwise', 'install', 'ack', 'mic')
):
self._add_keypckt(sta, 2, pckt)
def __iter__(self):
return [
ap for essid,
ap in sorted([(ap.essid, ap) for ap in self.air.itervalues()])
].__iter__()
def __getitem__(self, bssid):
return self.air[bssid]
def __contains__(self, bssid):
return bssid in self.air
def __len__(self):
return len(self.air)
#class Dot11ControlFrame(scapy.layers.dot11.Dot11):
#pass
#class Dot11NullFunction(scapy.layers.dot11.Dot11):
#def summary():
#return "NullFunction"
#class Dot11ACK(scapy.layers.dot11.Dot11):
#def summary():
#return "ACK"
#scapy.packet.bind_layers(scapy.layers.dot11.Dot11, Dot11ControlFrame, type=1)
#scapy.packet.bind_layers(
#scapy.layers.dot11.Dot11, Dot11NullFunction, type=1, subtype=4
#)
#scapy.packet.bind_layers(
#scapy.layers.dot11.Dot11, Dot11ACK, type=1, subtype=13
#)
| |
# -*- coding: utf-8 -*-
"""
@author: Hagen Telg
"""
import datetime
import pandas as pd
import numpy as _np
import os
# import pylab as plt
# from atmPy.tools import conversion_tools as ct
from atmPy.general import timeseries
from atmPy.atmosphere import standards as atm_std
import pathlib
def read_file(path,
version = 'BBB_01',
pattern = 'HK',
skip_histogram = False,
ignore_colums = [], #['Flow_Rate_ccps', 'LED_P_MON', 'AI_4', 'AI_5', 'AI_7', 'AI_8', 'AI_9', 'AI_10', 'AI_11', 'LED_P_Mon_Therm', 'AO_Flow', 'AO_LaserPower', 'No_Pts', 'ValidParts', 'writeTime', 'currMax'],
verbose = False):
"""
Parameters
----------
path: string or list of strings.
This can either be a file name, a list of filenames or a folder.
version: string ['BBB_01']
BBB_01: Beagle bone
sbRio: sbRio
pattern: str
if folder is given than this is the pattern housekeeping files will be identified by
verbose: bool
Returns
-------
TimeSeries instance
"""
# test_data_folder = os.listdir()
# test_data_folder = '20150419_000_POPS_HK.csv'
def read_sbRio(fname, skip_histogram = False, verbose=False):
"""Reads housekeeping file (test_data_folder; csv-format) returns a pandas data frame instance.
"""
if verbose:
print('reading %s' % fname)
try:
df = pd.read_csv(fname, error_bad_lines=False)
except ValueError:
return False
# data = df.values
# dateString = test_data_folder.split('_')[0]
dt = datetime.datetime.strptime('19700101', "%Y%m%d") - datetime.datetime.strptime('19040101', "%Y%m%d")
dts = dt.total_seconds()
# todo: (low) what is that delta t for, looks fishi (Hagen)
dtsPlus = datetime.timedelta(hours=0).total_seconds()
# Time_s = data[:,0]
# data = data[:,1:]
df.index = pd.Series(pd.to_datetime(df.Time_s - dts - dtsPlus, unit='s'), name='Time_UTC')
# if 'P_Baro' in df.keys():
# df['barometric_pressure'] = df.P_Baro
# df.drop('P_Baro', 1, inplace=True)
# df['altitude'] = ct.p2h(df.barometric_pressure)
return POPSHouseKeeping(df)
def read_BBB(fname, skip_histogram = False, verbose = False):
if verbose:
print(f'read pops house keeping bbb file: {fname}')
col_names = pd.read_csv(fname, sep=',', nrows=1, header=None,
# index_col=1,
# usecols=np.arange()
).values[0][:-1].astype(str)
col_names = _np.char.strip(col_names)
if skip_histogram:
usecols = list(range(27))
else:
usecols = None
data = pd.read_csv(fname, sep=',', skiprows=1, header=None, usecols = usecols
# index_col=1,
# usecols=np.arange()
)
data_hk = data.iloc[:, :27]
data_hk.columns = col_names
data_hk.index = pd.to_datetime(data_hk['DateTime'], unit='s')
data_hk.drop('DateTime', axis=1, inplace=True)
# hk = atmPy.general.timeseries.TimeSeries(data_hk, sampling_period = 1)
hk = POPSHouseKeeping(data_hk, sampling_period=1)
hk.data['Barometric_pressure'] = hk.data['P']
return hk
def read_BBB_02(fname, skip_histogram = False, verbose = False):
if verbose:
print(f'read pops house keeping bbb file: {fname}')
# col_names = pd.read_csv(fname, sep=',', nrows=1, header=None,
# # index_col=1,
# # usecols=np.arange()
# ).values[0][:-1].astype(str)
# col_names = _np.char.strip(col_names)
if skip_histogram:
usecols = list(range(27))
else:
usecols = None
data = pd.read_csv(fname, sep=',', skiprows=1, header=None, usecols = usecols
# index_col=1,
# usecols=np.arange()
)
# data.columns = _np.char.strip(data.columns)
return data
data_hk = data#.iloc[:, :27]
# data_hk.columns = col_names
data_hk.index = pd.to_datetime(data_hk['DateTime'], unit='s')
data_hk.drop('DateTime', axis=1, inplace=True)
# hk = atmPy.general.timeseries.TimeSeries(data_hk, sampling_period = 1)
return data_hk
hk = POPSHouseKeeping(data_hk, sampling_period=1)
hk.data['Barometric_pressure'] = hk.data['P']
return hk
if version == 'sbRio':
read = read_sbRio
elif version == 'BBB_01':
read = read_BBB
elif version == 'BBB_02':
read = read_BBB_02
else:
raise ValueError('Housekeeping version {} is unknown!'.format(version))
path = pathlib.Path(path)
if path.is_dir():
file_paths = sorted(list(path.glob('*{}*'.format(pattern))))
elif path.is_file():
file_paths = [path]
elif type(path) == list:
file_paths = path
else:
raise TypeError('fname is of unknown type: {}'.format(type(path).__name__))
# print(path)
file_paths.sort()
first = True
hk_data = []
for file in file_paths:
# for i in houseKeeping_file_endings:
# if i in file:
# is_hk = True
# break
# else:
# is_hk = False
# if verbose and not is_hk:
# print('%s is not a housekeeping file ... continue' % file)
# if is_hk:
hktmp = read(file, skip_histogram=skip_histogram, verbose=verbose)
if not hktmp:
print('%s is empty ... next one' % file)
hk_data.append(hktmp.data)
# elif first:
# print('first')
# # data = hktmp.data.copy()
# first = False
# hk = hktmp # POPSHouseKeeping(data)
# # continue
# else:
# print('not first')
data = pd.concat(hk_data)
hk = POPSHouseKeeping(data)
# if type(path).__name__ == 'list':
# for file in path:
# for i in houseKeeping_file_endings:
# if i in file:
# is_hk = True
# break
# else:
# is_hk = False
# if verbose and not is_hk:
# print('%s is not a housekeeping file ... continue'%file)
#
# if is_hk:
# hktmp = read(foldername+file, verbose=verbose)
# if not hktmp:
# print('%s is empty ... next one' % file)
# elif first:
# print('first')
# # data = hktmp.data.copy()
# first = False
# hk = hktmp #POPSHouseKeeping(data)
# # continue
# else:
# print('not first')
# data = pd.concat((hk.data, hktmp.data))
# hk = POPSHouseKeeping(data)
# if first:
# txt = """Either the prvided list of names is empty, the files are empty, or none of the file names end on
# the required ending (*HK.csv)"""
# raise ValueError(txt)
# elif isinstance(path, str):
# hk = read(path)
#
# else:
# txt = 'fname is of unknown type: {}'.format(type(path).__name__)
# raise TypeError(txt)
hk.data = hk.data.dropna(how='all') # this is necessary to avoid errors in further processing
if ('P_Baro' in hk.data.keys()) or ('P_Ambient' in hk.data.keys()):
if 'P_Baro' in hk.data.keys():
hk.data['Barometric_pressure'] = hk.data.P_Baro
hk.data.drop('P_Baro', 1, inplace=True)
if 'P_Ambient' in hk.data.keys():
hk.data['Barometric_pressure'] = hk.data.P_Ambient
hk.data.drop('P_Ambient', 1, inplace=True)
# try:
# hk.data['Altitude'] = ct.p2h(hk.data.barometric_pressure)
if ignore_colums:
hk.data = hk.data.drop(ignore_colums, axis=1)
return hk
class POPSHouseKeeping(timeseries.TimeSeries):
def get_altitude(self, temperature=False):
"""Calculates the altitude from the measured barometric pressure
Arguments
---------
temperature: bool or array-like, optional
False: temperature according to international standard is assumed.
arraylike: actually measured temperature in Kelvin.
Returns
-------
returns altitude and adds it to this instance
"""
alt, tmp = atm_std.standard_atmosphere(self.data.loc[:,'Barometric_pressure'].copy(), quantity='pressure')
self.data['Altitude'] = alt
return alt
# todo: (low) this has never been actually implemented
# def read_housekeeping_allInFolder(concatWithOther = False, other = False, skip=[]):
# """Read all housekeeping files in current folder and concatinates them.
# Output: pandas dataFrame instance
# Parameters
# concatWithOther: bool, if you want to concat the created data with an older set given by other
# other: dataframe to concat the generated one
# skip: list of file which you want to exclude"""
#
# files = os.listdir('./')
# if concatWithOther:
# counter = True
# hkdf = other.copy()
# else:
# counter = False
# for e,i in enumerate(files):
# if 'HK.csv' in i:
# if i in skip:
# continue
# hkdf_tmp = read_housekeeping(i)
# if not counter:
# hkdf = hkdf_tmp
# else:
# hkdf = pd.concat([hkdf,hkdf_tmp])
# counter = True
# return hkdf
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2013-2016 Frantisek Uhrecky
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from PyQt4 import QtGui, QtCore
from TransController import tr
from GroupController import GroupController
import os
import AppSettings
from SaveDialog import SaveDialog
import InfoMsgBoxes
import qrcode
import random
import string
class Image(qrcode.image.base.BaseImage):
def __init__(self, border, width, box_size):
self.border = border
self.width = width
self.box_size = box_size
size = (width + border * 2) * box_size
self._image = QtGui.QImage(
size, size, QtGui.QImage.Format_RGB16)
self._image.fill(QtCore.Qt.white)
def pixmap(self):
return QtGui.QPixmap.fromImage(self._image)
def drawrect(self, row, col):
painter = QtGui.QPainter(self._image)
painter.fillRect(
(col + self.border) * self.box_size,
(row + self.border) * self.box_size,
self.box_size, self.box_size,
QtCore.Qt.black)
def save(self, stream, kind=None):
pass
class QrDialog(QtGui.QDialog):
def __init__(self, text):
super(QrDialog, self).__init__()
self.setWindowFlags(QtCore.Qt.Tool)
self.label = QtGui.QLabel(self)
layout = QtGui.QVBoxLayout(self)
layout.addWidget(self.label)
text = unicode(text)
self.label.setPixmap(qrcode.make(text, image_factory=Image).pixmap())
def center(self):
"""
Center window.
"""
# get frame geometry
wg = self.frameGeometry()
# get screen center
cs = QtGui.QDesktopWidget().availableGeometry().center()
wg.moveCenter(cs)
self.move(wg.topLeft())
class PasswdDialog(SaveDialog):
# emiting after saving passowrd
# param: p_id
signalPasswdSaved = QtCore.pyqtSignal(int)
def __init__(self, db_ctrl, show_pass = False, edit = True):
"""
COnstructor for password dialog, displys all necessary inputs.
@param db_ctrl: database controller
@param edit: if it will we edit dialog, show creation and modification date, else do not
@param show_pass: show password in visible form
"""
self.__db_ctrl = db_ctrl
self.__edit = edit
self.__show_pass = show_pass
super(PasswdDialog, self).__init__()
self.initUi()
self.initConections()
self.center()
# dafult never expire password
self._e_date_never.setChecked(True)
# intialize variables
self._attachment_data = ""
def initUi(self):
"""
Initilize UI components.
"""
SaveDialog.initUi(self)
title_label = QtGui.QLabel("<b>" + tr("Title:") + "</b>")
username_label = QtGui.QLabel("<b>" + tr("Username:") + "</b>")
passwd_label = QtGui.QLabel("<b>" + tr("Password:") + "</b>")
url_label = QtGui.QLabel("<b>" + tr("URL:") + "</b>")
if (self.__edit):
# if it is edit dialog display
layout_offset = 0
c_date_label = QtGui.QLabel("<b>" + tr("Creation date:") + "</b>")
m_date_label = QtGui.QLabel("<b>" + tr("Modification date:") + "</b>")
self._layout_gl.addWidget(c_date_label, 4, 0)
self._layout_gl.addWidget(m_date_label, 5, 0)
self._c_date = QtGui.QLabel()
self._m_date = QtGui.QLabel()
self._layout_gl.addWidget(self._c_date, 4, 1)
self._layout_gl.addWidget(self._m_date, 5, 1)
else:
layout_offset = -2
e_date_label = QtGui.QLabel("<b>" + tr("Expiration date:") + "</b>")
comment_label = QtGui.QLabel("<b>" + tr("Comment:") + "</b>")
attachment_label = QtGui.QLabel("<b>" + tr("Attachment:") + "</b>")
group_label = QtGui.QLabel("<b>" + tr("Groups:") + "</b>")
self._layout_gl.addWidget(title_label, 0, 0)
self._layout_gl.addWidget(username_label, 1, 0)
self._layout_gl.addWidget(passwd_label, 2, 0, QtCore.Qt.AlignTop)
self._layout_gl.addWidget(url_label, 3, 0)
self._layout_gl.addWidget(e_date_label, 6 + layout_offset, 0)
self._layout_gl.addWidget(attachment_label, 7 + layout_offset, 0)
self._layout_gl.addWidget(comment_label, 9 + layout_offset, 0)
self._layout_gl.addWidget(group_label, 10 + layout_offset, 0)
self._title = QtGui.QLineEdit()
self._username = QtGui.QLineEdit()
self._username.setFont(QtGui.QFont("Monospace"))
self._passwd = QtGui.QLineEdit()
self._passwd.setFont(QtGui.QFont("Monospace"))
self._pw_size = QtGui.QSpinBox()
self._pw_size.setMinimum(16)
self._pw_size.setValue(32)
self._pw_size.setToolTip("Minimum is 16 characters")
if (not self.__show_pass):
self._passwd.setEchoMode(QtGui.QLineEdit.Password)
# password layout
passwd_vl = QtGui.QVBoxLayout()
passwd_hl = QtGui.QHBoxLayout()
# password visibility check box
self._show_passwd_check = QtGui.QCheckBox(tr("Show passwd"))
self._show_passwd_check.setChecked(self.__show_pass)
# password QR code
self._qr_button = QtGui.QPushButton(tr("QR"))
self._pwgen_btn = QtGui.QPushButton(tr("RND"))
passwd_vl.addWidget(self._passwd)
passwd_vl.addLayout(passwd_hl)
passwd_hl.addWidget(self._show_passwd_check)
passwd_hl.addWidget(self._qr_button)
passwd_hl.addWidget(self._pwgen_btn)
passwd_hl.addWidget(self._pw_size)
self._url = QtGui.QLineEdit()
self._e_date = QtGui.QLineEdit()
self._comment = QtGui.QTextEdit()
self._comment.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self._comment.setMaximumHeight(200)
self._group = QtGui.QComboBox()
self._att_name = QtGui.QLineEdit()
self._att_name.setEnabled(False)
self._layout_gl.addWidget(self._title, 0, 1)
self._layout_gl.addWidget(self._username, 1, 1)
self._layout_gl.addLayout(passwd_vl, 2, 1)
self._layout_gl.addWidget(self._url, 3, 1)
# attachment vertical layout
att_vl = QtGui.QVBoxLayout()
# attachment layout
att_hl_1 = QtGui.QHBoxLayout()
att_hl_2 = QtGui.QHBoxLayout()
att_vl.addLayout(att_hl_1)
att_vl.addLayout(att_hl_2)
# open file button
self._att_button = QtGui.QPushButton(tr("Load"))
self._att_del_button = QtGui.QPushButton(tr("Delete"))
self._att_save_button = QtGui.QPushButton(tr("Download"))
self._att_open_button = QtGui.QPushButton(tr("Open"))
self._att_del_button.setEnabled(False)
self._att_save_button.setEnabled(False)
self._att_open_button.setEnabled(False)
att_hl_1.addWidget(self._att_button)
att_hl_1.addWidget(self._att_del_button)
att_hl_2.addWidget(self._att_save_button)
att_hl_2.addWidget(self._att_open_button)
self._layout_gl.addWidget(self._att_name, 7 + layout_offset, 1)
self._layout_gl.addLayout(att_vl, 8 + layout_offset, 1)
self._layout_gl.addWidget(self._comment, 9 + layout_offset, 1)
self._layout_gl.addWidget(self._group, 10 + layout_offset, 1)
# date time edit
self._e_date_edit = QtGui.QDateTimeEdit()
self._e_date_edit.setCalendarPopup(True)
# expiration date can't be lower than current date
self._e_date_edit.setMinimumDateTime(QtCore.QDateTime.currentDateTime())
# create never check box
self._e_date_never = QtGui.QCheckBox(tr("Never"))
# create horizontal layout for date selector and never check box
e_date_hl = QtGui.QHBoxLayout()
e_date_hl.addWidget(self._e_date_edit)
e_date_hl.addWidget(self._e_date_never)
# add to main layout
self._layout_gl.addLayout(e_date_hl, 6 + layout_offset, 1)
def setVisibilityPass(self, state):
"""
Set no visible password and username.
"""
if (state == QtCore.Qt.Checked):
self._passwd.setEchoMode(QtGui.QLineEdit.Normal)
else:
self._passwd.setEchoMode(QtGui.QLineEdit.Password)
def initConections(self):
"""
Initialize all connections, handling events.
@requires: initUI(), setPassword() first
"""
SaveDialog.initConections(self)
# when something changed, enable save button
self._title.textChanged.connect(self.enableSaveButton)
self._username.textChanged.connect(self.enableSaveButton)
self._passwd.textChanged.connect(self.enableSaveButton)
self._url.textChanged.connect(self.enableSaveButton)
self._comment.textChanged.connect(self.enableSaveButton)
self._att_name.textChanged.connect(self.enableSaveButton)
self._e_date_edit.dateChanged.connect(self.enableSaveButton)
self._group.currentIndexChanged.connect(self.enableSaveButton)
# never checked
self._e_date_never.stateChanged.connect(self.enDisExpDate)
self._e_date_never.stateChanged.connect(self.enableSaveButton)
# show QR
self._qr_button.clicked.connect(self.showQrCode)
# open attachment
self._att_button.clicked.connect(self.loadAttachment)
# delete attachment
self._att_del_button.clicked.connect(self.delAttachment)
# save attachment to disk
self._att_save_button.clicked.connect(self.saveAttachment)
# open attachment file
self._att_open_button.clicked.connect(self.openAttachment)
# attachment input label
self._att_name.textChanged.connect(self.enableAttEditAndButton)
# show/hide password
self._show_passwd_check.stateChanged.connect(self.setVisibilityPass)
# pw generator
self._pwgen_btn.clicked.connect(self.pwGen)
def pwGen(self):
self._passwd.setText(''.join(random.SystemRandom().choice(string.letters + string.digits) for _ in range(self._pw_size.value())))
def delAttachment(self):
"""
Delete actual attachment.
"""
logging.info("deleting attachment")
# empty attachment name and disable input
self._att_name.clear()
self._att_name.setDisabled(True)
# empty binary data
self._attachment_data = ""
# diable del button
self._att_del_button.setDisabled(True)
self._att_save_button.setDisabled(True)
self._att_open_button.setDisabled(True)
def enableAttEditAndButton(self):
"""
Enable attachment name input.
"""
self._att_name.setEnabled(True)
self._att_del_button.setEnabled(True)
self._att_save_button.setEnabled(True)
self._att_open_button.setEnabled(True)
def loadGroups(self, g_id = False):
"""
Load available groups to combobox
"""
# set groups combobox
group_ctrl = GroupController(self.__db_ctrl)
groups = group_ctrl.selectAll()
# tmp index
tmp = 0
# have to increment tmp
inc_tmp = True
# fill combobox
for group in groups:
logging.info("adding group ID: %d", group._id)
# load icon
pix = QtGui.QPixmap()
pix.loadFromData(group._icon._icon)
# add item with icon, name and group ID
self._group.addItem(QtGui.QIcon(pix), tr(group._name), group._id)
if (g_id):
# if a dont have curent group
if (group._id != g_id and inc_tmp):
tmp += 1
logging.info("temp group index: %d, group._id: %d, g_id: %d", tmp, group._id, g_id)
else:
if inc_tmp:
logging.info("group found")
inc_tmp = False
# set current group
if (g_id):
self._group.setCurrentIndex(tmp)
def enDisExpDate(self, state):
"""
Enable or disable expiration date selector. Depends on checkbox state.
@param state: check box state
"""
logging.debug("never checkbox state changed")
if (state == QtCore.Qt.Checked):
self._e_date_edit.setEnabled(False)
else:
self._e_date_edit.setEnabled(True)
def getGroupId(self):
"""
Get group ID from combobox item.
@return: group ID
"""
index = self._group.currentIndex()
# return a touple
group_id = self._group.itemData(index).toInt()[0]
logging.info("current item index: %d group: %d", index, group_id)
return group_id
def keyReleaseEvent(self, event):
"""
Handle release event.
"""
logging.info("key release event")
def showQrCode(self):
print "dsadsa"
qr_w = QrDialog(self._passwd.text())
qr_w.resize(200, 200)
qr_w.setGeometry(self.geometry().x() + self.geometry().width(), self.geometry().y(), 200, 200)
# qr_w.center()
qr_w.exec_()
def loadAttachment(self):
"""
Exec filedialog, open file and get absolute file path and name.
"""
try:
home_loc = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.HomeLocation)
file_path = QtGui.QFileDialog.getOpenFileName(self, tr("Open attachment"), home_loc)
if (not file_path.isEmpty()):
file_path = str(file_path.toUtf8())
file_name = os.path.basename(file_path)
logging.info("attachment file path: %s", file_path)
logging.info("attachment file name: %s", file_name)
# set attachment name
self._att_name.setText(QtCore.QString.fromUtf8(file_name))
# read binary data
data = self.readFile(file_path)
if (data):
self._attachment_data = data
self.enableSaveButton()
else:
logging.debug("file not selected")
except Exception as e:
logging.exception(e)
InfoMsgBoxes.showErrorMsg(e)
def saveAttachment(self):
"""
Save attachment to disk.
"""
try:
home_loc = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.HomeLocation)
home_loc = QtCore.QString.fromUtf8(home_loc + os.path.sep)
home_loc.append(self._att_name.text())
file_path = QtGui.QFileDialog.getSaveFileName(self, tr("Open attachment"), home_loc)
logging.info("save attachment to file: %s", file_path)
if (not file_path.isEmpty()):
file_path = str(file_path.toUtf8())
logging.info("attachment file path: %s", file_path)
# write data to disk
self.writeFile(file_path)
else:
logging.info("file not selected")
except Exception as e:
logging.exception(e)
InfoMsgBoxes.showErrorMsg(e)
def openAttachment(self):
"""
Open attachment using desktop services.
"""
try:
tmp_file = AppSettings.TMP_PATH + str(self._att_name.text().toUtf8())
logging.info("saving attachment to tmp file: '%s'", tmp_file)
self.writeFile(tmp_file)
if (not QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(QtCore.QString.fromUtf8(tmp_file)))):
# not succesfully opened
QtGui.QMessageBox(QtGui.QMessageBox.Information, tr("Something wrong!"), tr("Can't open file '") + QtCore.QString.fromUtf8(tmp_file) + "\n" +
tr("Save it to disk and open with selected program.")).exec_()
except Exception as e:
logging.exception(e)
InfoMsgBoxes.showErrorMsg(e)
def writeFile(self, file_path):
"""
Write file to disk.
@param file_path: file to write
"""
f = None
try:
f = open(AppSettings.decodePath(file_path), "wb")
f.write(self._attachment_data)
except IOError as e:
logging.exception(e)
raise e
except:
logging.exception("exception writting file: %s", file_path)
raise e
finally:
if (f):
f.close()
def readFile(self, file_path):
"""
Read file binary. Return read data.
@param file_path: path to file
@return: on succes binary data, else None
"""
data = None
f = None
try:
logging.info("reading file: %s", file_path)
f = open(AppSettings.decodePath(file_path), "rb")
data = f.read()
logging.info("file size: %i", len(data))
except IOError as e:
logging.exception(e)
raise e
except:
# all other exceptions
logging.exception("exception, file: %s", file_path)
raise "exception, file: " + file_path
finally:
if (f):
f.close()
return data
def saveChanges(self):
"""
Save changes to database, read all inputs and save DB entry.
@todo: implement saving password, emiting signal singalPasswdSaved, and close dialog
"""
# TODO: implement saving password, emiting signal singalPasswdSaved, and close dialog
pass
| |
"""Test the Google Nest Device Access config flow."""
import copy
from unittest.mock import patch
from google_nest_sdm.exceptions import (
AuthException,
ConfigurationException,
SubscriberException,
)
import pytest
from homeassistant import config_entries, setup
from homeassistant.components.nest.const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow
from .common import FakeSubscriber, MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
PROJECT_ID = "project-id-4321"
SUBSCRIBER_ID = "projects/cloud-id-9876/subscriptions/subscriber-id-9876"
CLOUD_PROJECT_ID = "cloud-id-9876"
CONFIG = {
DOMAIN: {
"project_id": PROJECT_ID,
"subscriber_id": SUBSCRIBER_ID,
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
"http": {"base_url": "https://example.com"},
}
ORIG_AUTH_DOMAIN = DOMAIN
WEB_AUTH_DOMAIN = DOMAIN
APP_AUTH_DOMAIN = f"{DOMAIN}.installed"
WEB_REDIRECT_URL = "https://example.com/auth/external/callback"
APP_REDIRECT_URL = "urn:ietf:wg:oauth:2.0:oob"
@pytest.fixture
def subscriber() -> FakeSubscriber:
"""Create FakeSubscriber."""
return FakeSubscriber()
def get_config_entry(hass):
"""Return a single config entry."""
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
return entries[0]
def create_config_entry(hass: HomeAssistant, data: dict) -> ConfigEntry:
"""Create the ConfigEntry."""
entry = MockConfigEntry(
domain=DOMAIN,
data=data,
unique_id=DOMAIN,
)
entry.add_to_hass(hass)
return entry
class OAuthFixture:
"""Simulate the oauth flow used by the config flow."""
def __init__(self, hass, hass_client_no_auth, aioclient_mock):
"""Initialize OAuthFixture."""
self.hass = hass
self.hass_client = hass_client_no_auth
self.aioclient_mock = aioclient_mock
async def async_pick_flow(self, result: dict, auth_domain: str) -> dict:
"""Invoke flow to puth the auth type to use for this flow."""
assert result["type"] == "form"
assert result["step_id"] == "pick_implementation"
return await self.async_configure(result, {"implementation": auth_domain})
async def async_oauth_web_flow(self, result: dict) -> None:
"""Invoke the oauth flow for Web Auth with fake responses."""
state = self.create_state(result, WEB_REDIRECT_URL)
assert result["url"] == self.authorize_url(state, WEB_REDIRECT_URL)
# Simulate user redirect back with auth code
client = await self.hass_client()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
await self.async_mock_refresh(result)
async def async_oauth_app_flow(self, result: dict) -> None:
"""Invoke the oauth flow for Installed Auth with fake responses."""
# Render form with a link to get an auth token
assert result["type"] == "form"
assert result["step_id"] == "auth"
assert "description_placeholders" in result
assert "url" in result["description_placeholders"]
state = self.create_state(result, APP_REDIRECT_URL)
assert result["description_placeholders"]["url"] == self.authorize_url(
state, APP_REDIRECT_URL
)
# Simulate user entering auth token in form
await self.async_mock_refresh(result, {"code": "abcd"})
async def async_reauth(self, old_data: dict) -> dict:
"""Initiate a reuath flow."""
result = await self.hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=old_data
)
assert result["type"] == "form"
assert result["step_id"] == "reauth_confirm"
# Advance through the reauth flow
flows = self.hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["step_id"] == "reauth_confirm"
# Advance to the oauth flow
return await self.hass.config_entries.flow.async_configure(
flows[0]["flow_id"], {}
)
def create_state(self, result: dict, redirect_url: str) -> str:
"""Create state object based on redirect url."""
return config_entry_oauth2_flow._encode_jwt(
self.hass,
{
"flow_id": result["flow_id"],
"redirect_uri": redirect_url,
},
)
def authorize_url(self, state: str, redirect_url: str) -> str:
"""Generate the expected authorization url."""
oauth_authorize = OAUTH2_AUTHORIZE.format(project_id=PROJECT_ID)
return (
f"{oauth_authorize}?response_type=code&client_id={CLIENT_ID}"
f"&redirect_uri={redirect_url}"
f"&state={state}&scope=https://www.googleapis.com/auth/sdm.service"
"+https://www.googleapis.com/auth/pubsub"
"&access_type=offline&prompt=consent"
)
async def async_mock_refresh(self, result, user_input: dict = None) -> None:
"""Finish the OAuth flow exchanging auth token for refresh token."""
self.aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
async def async_finish_setup(
self, result: dict, user_input: dict = None
) -> ConfigEntry:
"""Finish the OAuth flow exchanging auth token for refresh token."""
with patch(
"homeassistant.components.nest.async_setup_entry", return_value=True
) as mock_setup:
await self.async_configure(result, user_input)
assert len(mock_setup.mock_calls) == 1
await self.hass.async_block_till_done()
return self.get_config_entry()
async def async_configure(self, result: dict, user_input: dict) -> dict:
"""Advance to the next step in the config flow."""
return await self.hass.config_entries.flow.async_configure(
result["flow_id"], user_input
)
async def async_pubsub_flow(self, result: dict, cloud_project_id="") -> ConfigEntry:
"""Verify the pubsub creation step."""
# Render form with a link to get an auth token
assert result["type"] == "form"
assert result["step_id"] == "pubsub"
assert "description_placeholders" in result
assert "url" in result["description_placeholders"]
assert result["data_schema"]({}) == {"cloud_project_id": cloud_project_id}
def get_config_entry(self) -> ConfigEntry:
"""Get the config entry."""
return get_config_entry(self.hass)
@pytest.fixture
async def oauth(hass, hass_client_no_auth, aioclient_mock, current_request_with_host):
"""Create the simulated oauth flow."""
return OAuthFixture(hass, hass_client_no_auth, aioclient_mock)
async def async_setup_configflow(hass):
"""Set up component so the pubsub subscriber is managed by config flow."""
config = copy.deepcopy(CONFIG)
del config[DOMAIN]["subscriber_id"] # Create in config flow instead
return await setup.async_setup_component(hass, DOMAIN, config)
async def test_web_full_flow(hass, oauth):
"""Check full flow."""
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, WEB_AUTH_DOMAIN)
await oauth.async_oauth_web_flow(result)
entry = await oauth.async_finish_setup(result)
assert entry.title == "OAuth for Web"
assert "token" in entry.data
entry.data["token"].pop("expires_at")
assert entry.unique_id == DOMAIN
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
# Subscriber from configuration.yaml
assert "subscriber_id" not in entry.data
async def test_web_reauth(hass, oauth):
"""Test Nest reauthentication."""
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
old_entry = create_config_entry(
hass,
{
"auth_implementation": WEB_AUTH_DOMAIN,
"token": {
# Verify this is replaced at end of the test
"access_token": "some-revoked-token",
},
"sdm": {},
},
)
entry = get_config_entry(hass)
assert entry.data["token"] == {
"access_token": "some-revoked-token",
}
result = await oauth.async_reauth(old_entry.data)
await oauth.async_oauth_web_flow(result)
entry = await oauth.async_finish_setup(result)
# Verify existing tokens are replaced
entry.data["token"].pop("expires_at")
assert entry.unique_id == DOMAIN
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert entry.data["auth_implementation"] == WEB_AUTH_DOMAIN
assert "subscriber_id" not in entry.data # not updated
async def test_single_config_entry(hass):
"""Test that only a single config entry is allowed."""
create_config_entry(hass, {"auth_implementation": WEB_AUTH_DOMAIN, "sdm": {}})
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_unexpected_existing_config_entries(hass, oauth):
"""Test Nest reauthentication with multiple existing config entries."""
# Note that this case will not happen in the future since only a single
# instance is now allowed, but this may have been allowed in the past.
# On reauth, only one entry is kept and the others are deleted.
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
old_entry = MockConfigEntry(
domain=DOMAIN, data={"auth_implementation": WEB_AUTH_DOMAIN, "sdm": {}}
)
old_entry.add_to_hass(hass)
old_entry = MockConfigEntry(
domain=DOMAIN, data={"auth_implementation": WEB_AUTH_DOMAIN, "sdm": {}}
)
old_entry.add_to_hass(hass)
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 2
# Invoke the reauth flow
result = await oauth.async_reauth(old_entry.data)
await oauth.async_oauth_web_flow(result)
await oauth.async_finish_setup(result)
# Only a single entry now exists, and the other was cleaned up
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.unique_id == DOMAIN
entry.data["token"].pop("expires_at")
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert "subscriber_id" not in entry.data # not updated
async def test_reauth_missing_config_entry(hass):
"""Test the reauth flow invoked missing existing data."""
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
# Invoke the reauth flow with no existing data
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=None
)
assert result["type"] == "abort"
assert result["reason"] == "missing_configuration"
async def test_app_full_flow(hass, oauth):
"""Check full flow."""
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, APP_AUTH_DOMAIN)
await oauth.async_oauth_app_flow(result)
entry = await oauth.async_finish_setup(result, {"code": "1234"})
assert entry.title == "OAuth for Apps"
assert "token" in entry.data
entry.data["token"].pop("expires_at")
assert entry.unique_id == DOMAIN
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
# Subscriber from configuration.yaml
assert "subscriber_id" not in entry.data
async def test_app_reauth(hass, oauth):
"""Test Nest reauthentication for Installed App Auth."""
assert await setup.async_setup_component(hass, DOMAIN, CONFIG)
old_entry = create_config_entry(
hass,
{
"auth_implementation": APP_AUTH_DOMAIN,
"token": {
# Verify this is replaced at end of the test
"access_token": "some-revoked-token",
},
"sdm": {},
},
)
result = await oauth.async_reauth(old_entry.data)
await oauth.async_oauth_app_flow(result)
# Verify existing tokens are replaced
entry = await oauth.async_finish_setup(result, {"code": "1234"})
entry.data["token"].pop("expires_at")
assert entry.unique_id == DOMAIN
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert entry.data["auth_implementation"] == APP_AUTH_DOMAIN
assert "subscriber_id" not in entry.data # not updated
async def test_pubsub_subscription(hass, oauth, subscriber):
"""Check flow that creates a pub/sub subscription."""
assert await async_setup_configflow(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, APP_AUTH_DOMAIN)
await oauth.async_oauth_app_flow(result)
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber",
return_value=subscriber,
):
result = await oauth.async_configure(result, {"code": "1234"})
await oauth.async_pubsub_flow(result)
entry = await oauth.async_finish_setup(
result, {"cloud_project_id": CLOUD_PROJECT_ID}
)
await hass.async_block_till_done()
assert entry.title == "OAuth for Apps"
assert "token" in entry.data
entry.data["token"].pop("expires_at")
assert entry.unique_id == DOMAIN
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert "subscriber_id" in entry.data
assert entry.data["cloud_project_id"] == CLOUD_PROJECT_ID
async def test_pubsub_subscription_auth_failure(hass, oauth):
"""Check flow that creates a pub/sub subscription."""
assert await async_setup_configflow(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, APP_AUTH_DOMAIN)
await oauth.async_oauth_app_flow(result)
result = await oauth.async_configure(result, {"code": "1234"})
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.create_subscription",
side_effect=AuthException(),
):
await oauth.async_pubsub_flow(result)
result = await oauth.async_configure(
result, {"cloud_project_id": CLOUD_PROJECT_ID}
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "invalid_access_token"
async def test_pubsub_subscription_failure(hass, oauth):
"""Check flow that creates a pub/sub subscription."""
assert await async_setup_configflow(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, APP_AUTH_DOMAIN)
await oauth.async_oauth_app_flow(result)
result = await oauth.async_configure(result, {"code": "1234"})
await oauth.async_pubsub_flow(result)
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.create_subscription",
side_effect=SubscriberException(),
):
result = await oauth.async_configure(
result, {"cloud_project_id": CLOUD_PROJECT_ID}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert "errors" in result
assert "cloud_project_id" in result["errors"]
assert result["errors"]["cloud_project_id"] == "subscriber_error"
async def test_pubsub_subscription_configuration_failure(hass, oauth):
"""Check flow that creates a pub/sub subscription."""
assert await async_setup_configflow(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, APP_AUTH_DOMAIN)
await oauth.async_oauth_app_flow(result)
result = await oauth.async_configure(result, {"code": "1234"})
await oauth.async_pubsub_flow(result)
with patch(
"homeassistant.components.nest.api.GoogleNestSubscriber.create_subscription",
side_effect=ConfigurationException(),
):
result = await oauth.async_configure(
result, {"cloud_project_id": CLOUD_PROJECT_ID}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert "errors" in result
assert "cloud_project_id" in result["errors"]
assert result["errors"]["cloud_project_id"] == "bad_project_id"
async def test_pubsub_with_wrong_project_id(hass, oauth):
"""Test a possible common misconfiguration mixing up project ids."""
assert await async_setup_configflow(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await oauth.async_pick_flow(result, APP_AUTH_DOMAIN)
await oauth.async_oauth_app_flow(result)
result = await oauth.async_configure(result, {"code": "1234"})
await oauth.async_pubsub_flow(result)
result = await oauth.async_configure(
result, {"cloud_project_id": PROJECT_ID} # SDM project id
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert "errors" in result
assert "cloud_project_id" in result["errors"]
assert result["errors"]["cloud_project_id"] == "wrong_project_id"
async def test_pubsub_subscriber_config_entry_reauth(hass, oauth, subscriber):
"""Test the pubsub subscriber id is preserved during reauth."""
assert await async_setup_configflow(hass)
old_entry = create_config_entry(
hass,
{
"auth_implementation": APP_AUTH_DOMAIN,
"subscriber_id": SUBSCRIBER_ID,
"cloud_project_id": CLOUD_PROJECT_ID,
"token": {
"access_token": "some-revoked-token",
},
"sdm": {},
},
)
result = await oauth.async_reauth(old_entry.data)
await oauth.async_oauth_app_flow(result)
# Entering an updated access token refreshs the config entry.
entry = await oauth.async_finish_setup(result, {"code": "1234"})
entry.data["token"].pop("expires_at")
assert entry.unique_id == DOMAIN
assert entry.data["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
assert entry.data["auth_implementation"] == APP_AUTH_DOMAIN
assert entry.data["subscriber_id"] == SUBSCRIBER_ID
assert entry.data["cloud_project_id"] == CLOUD_PROJECT_ID
| |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
import os
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
# TODO(sdague): no tests in the nova/tests tree should inherit from
# base test classes in another library. This causes all kinds of havoc
# in these doing things incorrectly for what we need in subunit
# reporting. This is a long unwind, but should be done in the future
# and any code needed out of oslo_db should be exported / accessed as
# a fixture.
class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 4
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
# NOTE(sdague): the oslo_db base test case completely
# invalidates our logging setup, we actually have to do that
# before it is called to keep this from vomitting all over our
# test output.
self.useFixture(nova_fixtures.StandardLogging())
super(NovaMigrationsCheckers, self).setUp()
# NOTE(rpodolyaka): we need to repeat the functionality of the base
# test case a bit here as this gets overridden by oslotest base test
# case and nova base test case cleanup must be the last one (as it
# deletes attributes of test case instances)
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s does not exist' % (table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s should not exist' % (table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s does not exist' %
(index, table_name))
def assertIndexNotExists(self, engine, table_name, index):
self.assertFalse(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s should not exist' %
(index, table_name))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
# Implementations for ModelsMigrationsSync
def db_sync(self, engine):
with mock.patch.object(sa_migration, 'get_engine',
return_value=engine):
sa_migration.db_sync()
def get_engine(self, context=None):
return self.migrate_engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model. shadow_* are generated from
# the model and have their own tests to ensure they don't
# drift.
if name == 'migrate_version' or name.startswith('shadow_'):
return False
return True
def _skippable_migrations(self):
special = [
216, # Havana
272, # NOOP migration due to revert
]
havana_placeholders = list(range(217, 227))
icehouse_placeholders = list(range(235, 244))
juno_placeholders = list(range(255, 265))
kilo_placeholders = list(range(281, 291))
liberty_placeholders = list(range(303, 313))
mitaka_placeholders = list(range(320, 330))
newton_placeholders = list(range(335, 345))
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders +
kilo_placeholders +
liberty_placeholders +
mitaka_placeholders +
newton_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
# NOTE(danms): This is a list of migrations where we allow dropping
# things. The rules for adding things here are very very specific.
# Chances are you don't meet the critera.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
exceptions = [
# 267 enforces non-nullable instance.uuid. This was mostly
# a special case because instance.uuid shouldn't be able
# to be nullable
267,
# 278 removes a FK restriction, so it's an alter operation
# that doesn't break existing users
278,
# 280 enforces non-null keypair name. This is really not
# something we should allow, but it's in the past
280,
# 292 drops completely orphaned tables with no users, so
# it can be done without affecting anything.
292,
# 346 Drops column scheduled_at from instances table since it
# is no longer used. The field value is always NULL so
# it does not affect anything.
346,
]
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
# NOTE(danms): We only started requiring things be additive in
# kilo, so ignore all migrations before that point.
KILO_START = 265
if version >= KILO_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with nova_fixtures.BannedDBSchemaOperations(banned):
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
# Drop a variety of unique constraints to ensure that the script
# properly readds them back
for table_name, constraint_name in [
('compute_nodes', 'uniq_compute_nodes0'
'host0hypervisor_hostname'),
('fixed_ips', 'uniq_fixed_ips0address0deleted'),
('instance_info_caches', 'uniq_instance_info_caches0'
'instance_uuid'),
('instance_type_projects', 'uniq_instance_type_projects0'
'instance_type_id0project_id0'
'deleted'),
('pci_devices', 'uniq_pci_devices0compute_node_id0'
'address0deleted'),
('virtual_interfaces', 'uniq_virtual_interfaces0'
'address0deleted')]:
table = oslodbutils.get_table(engine, table_name)
constraints = [c for c in table.constraints
if c.name == constraint_name]
for cons in constraints:
# Need to use sqlalchemy-migrate UniqueConstraint
cons = UniqueConstraint(*[c.name for c in cons.columns],
name=cons.name,
table=table)
cons.drop()
def _check_273(self, engine, data):
for src_table, src_column, dst_table, dst_column in [
('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
('instance_system_metadata', 'instance_uuid',
'instances', 'uuid'),
('instance_type_projects', 'instance_type_id',
'instance_types', 'id'),
('iscsi_targets', 'volume_id', 'volumes', 'id'),
('reservations', 'usage_id', 'quota_usages', 'id'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
('compute_nodes', 'service_id', 'services', 'id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid')]:
src_table = oslodbutils.get_table(engine, src_table)
fkeys = {fk.parent.name: fk.column
for fk in src_table.foreign_keys}
self.assertIn(src_column, fkeys)
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
# entries.
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
fake_keypair = {'name': 'test-migr'}
key_pairs.insert().execute(fake_keypair)
def _check_275(self, engine, data):
self.assertColumnExists(engine, 'key_pairs', 'type')
self.assertColumnExists(engine, 'shadow_key_pairs', 'type')
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
shadow_key_pairs = oslodbutils.get_table(engine, 'shadow_key_pairs')
self.assertIsInstance(key_pairs.c.type.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_key_pairs.c.type.type,
sqlalchemy.types.String)
# Make sure the keypair entry will have the type 'ssh'
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
keypair = key_pairs.select(
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _check_291(self, engine, data):
# NOTE(danms): This is a dummy migration that just does a consistency
# check
pass
def _check_292(self, engine, data):
self.assertTableNotExists(engine, 'iscsi_targets')
self.assertTableNotExists(engine, 'volumes')
self.assertTableNotExists(engine, 'shadow_iscsi_targets')
self.assertTableNotExists(engine, 'shadow_volumes')
def _pre_upgrade_293(self, engine):
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = {}
migrations.insert().execute(fake_migration)
def _check_293(self, engine, data):
self.assertColumnExists(engine, 'migrations', 'migration_type')
self.assertColumnExists(engine, 'shadow_migrations', 'migration_type')
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = migrations.select().execute().first()
self.assertIsNone(fake_migration.migration_type)
self.assertFalse(fake_migration.hidden)
def _check_294(self, engine, data):
self.assertColumnExists(engine, 'services', 'last_seen_up')
self.assertColumnExists(engine, 'shadow_services', 'last_seen_up')
services = oslodbutils.get_table(engine, 'services')
shadow_services = oslodbutils.get_table(
engine, 'shadow_services')
self.assertIsInstance(services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
self.assertIsInstance(shadow_services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
def _pre_upgrade_295(self, engine):
self.assertIndexNotExists(engine, 'virtual_interfaces',
'virtual_interfaces_uuid_idx')
def _check_295(self, engine, data):
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_uuid_idx', ['uuid'])
def _check_296(self, engine, data):
pass
def _check_297(self, engine, data):
self.assertColumnExists(engine, 'services', 'forced_down')
def _check_298(self, engine, data):
# NOTE(nic): This is a MySQL-specific migration, and is a no-op from
# the point-of-view of unit tests, since they use SQLite
pass
def filter_metadata_diff(self, diff):
# Overriding the parent method to decide on certain attributes
# that maybe present in the DB but not in the models.py
def removed_column(element):
# Define a whitelist of columns that would be removed from the
# DB at a later release.
column_whitelist = {'instances': ['internal_id']}
if element[0] != 'remove_column':
return False
table_name, column = element[2], element[3]
return (table_name in column_whitelist and
column.name in column_whitelist[table_name])
return [
element
for element in diff
if not removed_column(element)
]
def _check_299(self, engine, data):
self.assertColumnExists(engine, 'services', 'version')
def _check_300(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'migration_context')
def _check_301(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes',
'cpu_allocation_ratio')
self.assertColumnExists(engine, 'compute_nodes',
'ram_allocation_ratio')
def _check_302(self, engine, data):
self.assertIndexMembers(engine, 'instance_system_metadata',
'instance_uuid', ['instance_uuid'])
def _check_313(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'parent_addr')
self.assertColumnExists(engine, 'shadow_pci_devices', 'parent_addr')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.parent_addr.type,
sqlalchemy.types.String)
self.assertTrue(pci_devices.c.parent_addr.nullable)
self.assertIsInstance(shadow_pci_devices.c.parent_addr.type,
sqlalchemy.types.String)
self.assertTrue(shadow_pci_devices.c.parent_addr.nullable)
self.assertIndexMembers(engine, 'pci_devices',
'ix_pci_devices_compute_node_id_parent_addr_deleted',
['compute_node_id', 'parent_addr', 'deleted'])
def _check_314(self, engine, data):
self.assertColumnExists(engine, 'inventories', 'resource_class_id')
self.assertColumnExists(engine, 'allocations', 'resource_class_id')
self.assertColumnExists(engine, 'resource_providers', 'id')
self.assertColumnExists(engine, 'resource_providers', 'uuid')
self.assertColumnExists(engine, 'compute_nodes', 'uuid')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'uuid')
self.assertIndexMembers(engine, 'allocations',
'allocations_resource_provider_class_id_idx',
['resource_provider_id', 'resource_class_id'])
def _check_315(self, engine, data):
self.assertColumnExists(engine, 'migrations',
'memory_total')
self.assertColumnExists(engine, 'migrations',
'memory_processed')
self.assertColumnExists(engine, 'migrations',
'memory_remaining')
self.assertColumnExists(engine, 'migrations',
'disk_total')
self.assertColumnExists(engine, 'migrations',
'disk_processed')
self.assertColumnExists(engine, 'migrations',
'disk_remaining')
def _check_316(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes',
'disk_allocation_ratio')
def _check_317(self, engine, data):
self.assertColumnExists(engine, 'aggregates', 'uuid')
self.assertColumnExists(engine, 'shadow_aggregates', 'uuid')
def _check_318(self, engine, data):
self.assertColumnExists(engine, 'resource_providers', 'name')
self.assertColumnExists(engine, 'resource_providers', 'generation')
self.assertColumnExists(engine, 'resource_providers', 'can_host')
self.assertIndexMembers(engine, 'resource_providers',
'resource_providers_name_idx',
['name'])
self.assertColumnExists(engine, 'resource_provider_aggregates',
'resource_provider_id')
self.assertColumnExists(engine, 'resource_provider_aggregates',
'aggregate_id')
self.assertIndexMembers(engine, 'resource_provider_aggregates',
'resource_provider_aggregates_aggregate_id_idx',
['aggregate_id'])
self.assertIndexMembers(engine, 'resource_provider_aggregates',
'resource_provider_aggregates_aggregate_id_idx',
['aggregate_id'])
self.assertIndexMembers(engine, 'inventories',
'inventories_resource_provider_resource_class_idx',
['resource_provider_id', 'resource_class_id'])
def _check_319(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_deleted_created_at_idx',
['deleted', 'created_at'])
def _check_330(self, engine, data):
# Just a sanity-check migration
pass
def _check_331(self, engine, data):
self.assertColumnExists(engine, 'virtual_interfaces', 'tag')
self.assertColumnExists(engine, 'block_device_mapping', 'tag')
def _check_332(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'keypairs')
def _check_333(self, engine, data):
self.assertColumnExists(engine, 'console_auth_tokens', 'id')
self.assertColumnExists(engine, 'console_auth_tokens', 'token_hash')
self.assertColumnExists(engine, 'console_auth_tokens', 'console_type')
self.assertColumnExists(engine, 'console_auth_tokens', 'host')
self.assertColumnExists(engine, 'console_auth_tokens', 'port')
self.assertColumnExists(engine, 'console_auth_tokens',
'internal_access_path')
self.assertColumnExists(engine, 'console_auth_tokens',
'instance_uuid')
self.assertColumnExists(engine, 'console_auth_tokens', 'expires')
self.assertIndexMembers(engine, 'console_auth_tokens',
'console_auth_tokens_instance_uuid_idx',
['instance_uuid'])
self.assertIndexMembers(engine, 'console_auth_tokens',
'console_auth_tokens_host_expires_idx',
['host', 'expires'])
self.assertIndexMembers(engine, 'console_auth_tokens',
'console_auth_tokens_token_hash_idx',
['token_hash'])
def _check_334(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'device_metadata')
self.assertColumnExists(engine, 'shadow_instance_extra',
'device_metadata')
def _check_345(self, engine, data):
# NOTE(danms): Just a sanity-check migration
pass
def _check_346(self, engine, data):
self.assertColumnNotExists(engine, 'instances', 'scheduled_at')
self.assertColumnNotExists(engine, 'shadow_instances', 'scheduled_at')
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_no_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
includes_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
includes_downgrade.append(fname)
helpful_msg = ("The following migrations have a downgrade "
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)
| |
import unittest
from datetime import (
date,
datetime,
)
from pypika import (
Criterion,
EmptyCriterion,
Field,
Table,
functions as fn,
)
from pypika.queries import QueryBuilder
from pypika.terms import ExistsCriterion, Mod
__author__ = "Timothy Heys"
__email__ = "theys@kayak.com"
class CriterionTests(unittest.TestCase):
t = Table("test", alias="crit")
def test__criterion_with_alias(self):
c1 = (Field("foo") == Field("bar")).as_("criterion")
self.assertEqual('"foo"="bar"', str(c1))
self.assertEqual('"foo"="bar" "criterion"', c1.get_sql(with_alias=True, quote_char='"', alias_quote_char='"'))
def test__criterion_eq_number(self):
c1 = Field("foo") == 1
c2 = Field("foo", table=self.t).eq(0)
c3 = Field("foo", table=self.t) == -1
self.assertEqual('"foo"=1', str(c1))
self.assertEqual('"crit"."foo"=0', str(c2))
self.assertEqual('"crit"."foo"=-1', str(c3))
def test__criterion_eq_decimal(self):
c1 = Field("foo") == 1.0
c2 = Field("foo", table=self.t).eq(0.5)
self.assertEqual('"foo"=1.0', str(c1))
self.assertEqual('"crit"."foo"=0.5', str(c2))
def test__criterion_eq_bool(self):
c1 = Field("foo") == True
c2 = Field("foo", table=self.t).eq(True)
c3 = Field("foo") == False
c4 = Field("foo", table=self.t).eq(False)
self.assertEqual('"foo"=true', str(c1))
self.assertEqual('"crit"."foo"=true', str(c2))
self.assertEqual('"foo"=false', str(c3))
self.assertEqual('"crit"."foo"=false', str(c4))
def test__criterion_eq_str(self):
c1 = Field("foo") == "abc"
c2 = Field("foo", table=self.t).eq("abc")
self.assertEqual("\"foo\"='abc'", str(c1))
self.assertEqual('"crit"."foo"=\'abc\'', str(c2))
def test__criterion_eq_date(self):
c1 = Field("foo") == date(2000, 1, 1)
c2 = Field("foo", table=self.t).eq(date(2000, 1, 1))
self.assertEqual("\"foo\"='2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"=\'2000-01-01\'', str(c2))
def test__criterion_eq_datetime(self):
c1 = Field("foo") == datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).eq(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"='2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"=\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_eq_right(self):
c1 = 1 == Field("foo")
c2 = -1 == Field("foo", table=self.t)
self.assertEqual('"foo"=1', str(c1))
self.assertEqual('"crit"."foo"=-1', str(c2))
def test__criterion_is_null(self):
c1 = Field("foo").isnull()
c2 = Field("foo", table=self.t).isnull()
self.assertEqual('"foo" IS NULL', str(c1))
self.assertEqual('"crit"."foo" IS NULL', str(c2))
def test__criterion_is_null_with_alias(self):
c1 = Field("foo").isnull().as_('alias')
c2 = Field("foo", table=self.t).isnull().as_('alias')
self.assertEqual('"foo" IS NULL "alias"', str(c1))
self.assertEqual('"crit"."foo" IS NULL "alias"', str(c2))
def test__criterion_ne_number(self):
c1 = Field("foo") != 1
c2 = Field("foo", table=self.t).ne(0)
c3 = Field("foo") != -1
self.assertEqual('"foo"<>1', str(c1))
self.assertEqual('"crit"."foo"<>0', str(c2))
self.assertEqual('"foo"<>-1', str(c3))
def test__criterion_ne_str(self):
c1 = Field("foo") != "abc"
c2 = Field("foo", table=self.t).ne("abc")
self.assertEqual("\"foo\"<>'abc'", str(c1))
self.assertEqual('"crit"."foo"<>\'abc\'', str(c2))
def test__criterion_ne_date(self):
c1 = Field("foo") != date(2000, 1, 1)
c2 = Field("foo", table=self.t).ne(date(2000, 1, 1))
self.assertEqual("\"foo\"<>'2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"<>\'2000-01-01\'', str(c2))
def test__criterion_ne_datetime(self):
c1 = Field("foo") != datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).ne(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"<>'2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"<>\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_ne_right(self):
c1 = 1 != Field("foo")
c2 = -1 != Field("foo", table=self.t)
self.assertEqual('"foo"<>1', str(c1))
self.assertEqual('"crit"."foo"<>-1', str(c2))
def test__criterion_lt_number(self):
c1 = Field("foo") < 1
c2 = Field("foo", table=self.t).lt(0)
c3 = Field("foo") < -1
self.assertEqual('"foo"<1', str(c1))
self.assertEqual('"crit"."foo"<0', str(c2))
self.assertEqual('"foo"<-1', str(c3))
def test__criterion_lt_date(self):
c1 = Field("foo") < date(2000, 1, 1)
c2 = Field("foo", table=self.t).lt(date(2000, 1, 1))
self.assertEqual("\"foo\"<'2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"<\'2000-01-01\'', str(c2))
def test__criterion_lt_datetime(self):
c1 = Field("foo") < datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).lt(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"<'2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"<\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_lt_right(self):
c1 = 1 > Field("foo")
c2 = -1 > Field("foo", table=self.t)
self.assertEqual('"foo"<1', str(c1))
self.assertEqual('"crit"."foo"<-1', str(c2))
def test__criterion_gt_number(self):
c1 = Field("foo") > 1
c2 = Field("foo", table=self.t).gt(0)
c3 = Field("foo") > -1
self.assertEqual('"foo">1', str(c1))
self.assertEqual('"crit"."foo">0', str(c2))
self.assertEqual('"foo">-1', str(c3))
def test__criterion_gt_date(self):
c1 = Field("foo") > date(2000, 1, 1)
c2 = Field("foo", table=self.t).gt(date(2000, 1, 1))
self.assertEqual("\"foo\">'2000-01-01'", str(c1))
self.assertEqual('"crit"."foo">\'2000-01-01\'', str(c2))
def test__criterion_gt_datetime(self):
c1 = Field("foo") > datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).gt(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\">'2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo">\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_gt_right(self):
c1 = 1 < Field("foo")
c2 = -1 < Field("foo", table=self.t)
self.assertEqual('"foo">1', str(c1))
self.assertEqual('"crit"."foo">-1', str(c2))
def test__criterion_lte_number(self):
c1 = Field("foo") <= 1
c2 = Field("foo", table=self.t).lte(0)
c3 = Field("foo") <= -1
self.assertEqual('"foo"<=1', str(c1))
self.assertEqual('"crit"."foo"<=0', str(c2))
self.assertEqual('"foo"<=-1', str(c3))
def test__criterion_lte_date(self):
c1 = Field("foo") <= date(2000, 1, 1)
c2 = Field("foo", table=self.t).lte(date(2000, 1, 1))
self.assertEqual("\"foo\"<='2000-01-01'", str(c1))
self.assertEqual('"crit"."foo"<=\'2000-01-01\'', str(c2))
def test__criterion_lte_datetime(self):
c1 = Field("foo") <= datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).lte(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\"<='2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo"<=\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_lte_right(self):
c1 = 1 >= Field("foo")
c2 = -1 >= Field("foo", table=self.t)
self.assertEqual('"foo"<=1', str(c1))
self.assertEqual('"crit"."foo"<=-1', str(c2))
def test__criterion_gte_number(self):
c1 = Field("foo") >= 1
c2 = Field("foo", table=self.t).gte(0)
c3 = Field("foo") >= -1
self.assertEqual('"foo">=1', str(c1))
self.assertEqual('"crit"."foo">=0', str(c2))
self.assertEqual('"foo">=-1', str(c3))
def test__criterion_gte_date(self):
c1 = Field("foo") >= date(2000, 1, 1)
c2 = Field("foo", table=self.t).gte(date(2000, 1, 1))
self.assertEqual("\"foo\">='2000-01-01'", str(c1))
self.assertEqual('"crit"."foo">=\'2000-01-01\'', str(c2))
def test__criterion_gte_datetime(self):
c1 = Field("foo") >= datetime(2000, 1, 1, 12, 30, 55)
c2 = Field("foo", table=self.t).gte(datetime(2000, 1, 1, 12, 30, 55))
self.assertEqual("\"foo\">='2000-01-01T12:30:55'", str(c1))
self.assertEqual('"crit"."foo">=\'2000-01-01T12:30:55\'', str(c2))
def test__criterion_gte_right(self):
c1 = 1 <= Field("foo")
c2 = -1 <= Field("foo", table=self.t)
self.assertEqual('"foo">=1', str(c1))
self.assertEqual('"crit"."foo">=-1', str(c2))
def test__criterion_bitwise_and(self):
c1 = Field("foo").bitwiseand(2)
c2 = Field("foo", table=self.t).bitwiseand(10) == 2
self.assertEqual('("foo" & 2)', str(c1))
self.assertEqual('("crit"."foo" & 10)=2', str(c2))
def test__criterion_bitwise_and_with_alias(self):
c1 = Field("foo").bitwiseand(2).as_('alias')
self.assertEqual('("foo" & 2) "alias"', str(c1))
def test__bitwise_and_in_where_clause(self):
q = QueryBuilder().from_('items').select('abc').where(Field("foo").bitwiseand(1) == 1)
self.assertEqual('SELECT "abc" FROM "items" WHERE ("foo" & 1)=1', str(q))
class NotTests(unittest.TestCase):
table_abc, table_efg = Table("abc", alias="cx0"), Table("efg", alias="cx1")
def test_negate(self):
c1 = Field("foo") >= 1
c2 = c1.negate()
self.assertEqual('"foo">=1', str(c1))
self.assertEqual('NOT "foo">=1', str(c2))
def test_variable_access(self):
c1 = Field("foo").negate()
self.assertEqual(c1.is_aggregate, False)
def test_chained_function(self):
field1 = Field("foo").negate()
field2 = field1.eq("bar")
self.assertEqual('NOT "foo"', str(field1))
self.assertEqual("NOT \"foo\"='bar'", str(field2))
self.assertIsNot(field1, field2)
def test_not_null(self):
c1 = Field("foo").notnull()
c2 = Field("foo", table=self.table_abc).notnull()
self.assertEqual('NOT "foo" IS NULL', str(c1))
self.assertEqual('NOT "cx0"."foo" IS NULL', str(c2))
def test_is_not_null(self):
c1 = Field("foo").isnotnull()
c2 = Field("foo", table=self.table_abc).isnotnull()
self.assertEqual('"foo" IS NOT NULL', str(c1))
self.assertEqual('"cx0"."foo" IS NOT NULL', str(c2))
def test_not_null_with_alias(self):
c1 = Field("foo").notnull().as_("something")
c2 = Field("foo", table=self.table_abc).notnull().as_("something")
self.assertEqual('NOT "foo" IS NULL "something"', str(c1))
self.assertEqual('NOT "cx0"."foo" IS NULL "something"', str(c2))
def test_notnullcriterion_replace_table(self):
f = self.table_abc.foo.notnull().replace_table(self.table_abc, self.table_efg)
self.assertEqual('NOT "cx1"."foo" IS NULL', str(f))
def test_not_with_or_criterion(self):
self.assertEqual('NOT ("foo" OR "bar")', str(~(Field("foo") | Field("bar"))))
def test_not_with_and_criterion(self):
self.assertEqual('NOT ("foo" AND "bar")', str(~(Field("foo") & Field("bar"))))
def test_not_with_complex_criterion(self):
self.assertEqual(
'NOT ("foo" AND "bar" AND "fizz" AND "buzz")',
str(~(Field("foo") & Field("bar") & Field("fizz") & Field("buzz"))),
)
class BetweenTests(unittest.TestCase):
t = Table("abc", alias="btw")
def test__between_number(self):
c1 = Field("foo").between(0, 1)
c2 = Field("foo", table=self.t).between(0, 1)
c3 = Field("foo")[0:1]
self.assertEqual('"foo" BETWEEN 0 AND 1', str(c1))
self.assertEqual('"btw"."foo" BETWEEN 0 AND 1', str(c2))
self.assertEqual('"foo" BETWEEN 0 AND 1', str(c3))
def test__between_with_alias(self):
c1 = Field("foo").between(0, 1).as_('alias')
c2 = Field("foo", table=self.t).between(0, 1).as_('alias')
c3 = Field("foo")[0:1].as_('alias')
self.assertEqual('"foo" BETWEEN 0 AND 1 "alias"', str(c1))
self.assertEqual('"btw"."foo" BETWEEN 0 AND 1 "alias"', str(c2))
self.assertEqual('"foo" BETWEEN 0 AND 1 "alias"', str(c3))
def test__between_date(self):
c1 = Field("foo").between(date(2000, 1, 1), date(2000, 12, 31))
c2 = Field("foo", table=self.t).between(date(2000, 1, 1), date(2000, 12, 31))
c3 = Field("foo")[date(2000, 1, 1) : date(2000, 12, 31)]
self.assertEqual("\"foo\" BETWEEN '2000-01-01' AND '2000-12-31'", str(c1))
self.assertEqual("\"btw\".\"foo\" BETWEEN '2000-01-01' AND '2000-12-31'", str(c2))
self.assertEqual("\"foo\" BETWEEN '2000-01-01' AND '2000-12-31'", str(c3))
def test__between_datetime(self):
c1 = Field("foo").between(datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59))
c2 = Field("foo", table=self.t).between(datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59))
c3 = Field("foo")[datetime(2000, 1, 1, 0, 0, 0) : datetime(2000, 12, 31, 23, 59, 59)]
self.assertEqual("\"foo\" BETWEEN '2000-01-01T00:00:00' AND '2000-12-31T23:59:59'", str(c1))
self.assertEqual(
"\"btw\".\"foo\" BETWEEN '2000-01-01T00:00:00' AND '2000-12-31T23:59:59'",
str(c2),
)
self.assertEqual("\"foo\" BETWEEN '2000-01-01T00:00:00' AND '2000-12-31T23:59:59'", str(c3))
def test__function_between(self):
c1 = fn.Coalesce(Field("foo"), 0)[0:1]
c2 = fn.Coalesce(Field("foo", table=self.t), 0)[0:1]
self.assertEqual('COALESCE("foo",0) BETWEEN 0 AND 1', str(c1))
self.assertEqual('COALESCE("btw"."foo",0) BETWEEN 0 AND 1', str(c2))
def test_get_item_only_works_with_slice(self):
with self.assertRaises(TypeError):
Field("foo")[0]
with self.assertRaises(TypeError):
Field("foo")[date(2000, 1, 1)]
with self.assertRaises(TypeError):
Field("foo")[datetime(2000, 1, 1, 0, 0, 0)]
class IsInTests(unittest.TestCase):
t = Table("abc", alias="isin")
def test__in_number(self):
c1 = Field("foo").isin([0, 1])
c2 = Field("foo", table=self.t).isin([0, 1])
self.assertEqual('"foo" IN (0,1)', str(c1))
self.assertEqual('"isin"."foo" IN (0,1)', str(c2))
def test__in_number_with_alias(self):
c1 = Field("foo").isin([0, 1]).as_('alias')
c2 = Field("foo", table=self.t).isin([0, 1]).as_('alias')
self.assertEqual('"foo" IN (0,1) "alias"', str(c1))
self.assertEqual('"isin"."foo" IN (0,1) "alias"', str(c2))
def test__in_character(self):
c1 = Field("foo").isin(["a", "b"])
c2 = Field("foo", table=self.t).isin(["a", "b"])
self.assertEqual("\"foo\" IN ('a','b')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('a','b')", str(c2))
def test__in_date(self):
c1 = Field("foo").isin([date(2000, 1, 1), date(2000, 12, 31)])
c2 = Field("foo", table=self.t).isin([date(2000, 1, 1), date(2000, 12, 31)])
self.assertEqual("\"foo\" IN ('2000-01-01','2000-12-31')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('2000-01-01','2000-12-31')", str(c2))
def test__in_datetime(self):
c1 = Field("foo").isin([datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)])
c2 = Field("foo", table=self.t).isin([datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)])
self.assertEqual("\"foo\" IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')", str(c2))
def test__function_isin(self):
c1 = fn.Coalesce(Field("foo"), 0).isin([0, 1])
c2 = fn.Coalesce(Field("foo", table=self.t), 0).isin([0, 1])
self.assertEqual('COALESCE("foo",0) IN (0,1)', str(c1))
self.assertEqual('COALESCE("isin"."foo",0) IN (0,1)', str(c2))
def test__in_unicode(self):
c1 = Field("foo").isin([u"a", u"b"])
c2 = Field("foo", table=self.t).isin([u"a", u"b"])
self.assertEqual("\"foo\" IN ('a','b')", str(c1))
self.assertEqual("\"isin\".\"foo\" IN ('a','b')", str(c2))
class ArithmeticExpressionTests(unittest.TestCase):
def test__lshift(self):
c1 = Field("foo").lshift("1")
c2 = Field("foo").lshift("2")
self.assertEqual("\"foo\"<<'1'", str(c1))
self.assertEqual("\"foo\"<<'2'", str(c2))
def test__rshift(self):
c1 = Field("foo").rshift("1")
c2 = Field("foo").rshift("2")
self.assertEqual("\"foo\">>'1'", str(c1))
self.assertEqual("\"foo\">>'2'", str(c2))
class NotInTests(unittest.TestCase):
t = Table("abc", alias="notin")
def test__notin_number(self):
c1 = Field("foo").notin([0, 1])
c2 = Field("foo", table=self.t).notin([0, 1])
self.assertEqual('"foo" NOT IN (0,1)', str(c1))
self.assertEqual('"notin"."foo" NOT IN (0,1)', str(c2))
def test__notin_number_with_alias(self):
c1 = Field("foo").notin([0, 1]).as_('alias')
c2 = Field("foo", table=self.t).notin([0, 1]).as_('alias')
self.assertEqual('"foo" NOT IN (0,1) "alias"', str(c1))
self.assertEqual('"notin"."foo" NOT IN (0,1) "alias"', str(c2))
def test__notin_character(self):
c1 = Field("foo").notin(["a", "b"])
c2 = Field("foo", table=self.t).notin(["a", "b"])
self.assertEqual("\"foo\" NOT IN ('a','b')", str(c1))
self.assertEqual("\"notin\".\"foo\" NOT IN ('a','b')", str(c2))
def test__notin_date(self):
c1 = Field("foo").notin([date(2000, 1, 1), date(2000, 12, 31)])
c2 = Field("foo", table=self.t).notin([date(2000, 1, 1), date(2000, 12, 31)])
self.assertEqual("\"foo\" NOT IN ('2000-01-01','2000-12-31')", str(c1))
self.assertEqual("\"notin\".\"foo\" NOT IN ('2000-01-01','2000-12-31')", str(c2))
def test__notin_datetime(self):
c1 = Field("foo").notin([datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)])
c2 = Field("foo", table=self.t).notin([datetime(2000, 1, 1, 0, 0, 0), datetime(2000, 12, 31, 23, 59, 59)])
self.assertEqual("\"foo\" NOT IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')", str(c1))
self.assertEqual(
"\"notin\".\"foo\" NOT IN ('2000-01-01T00:00:00','2000-12-31T23:59:59')",
str(c2),
)
def test__function_notin(self):
c1 = fn.Coalesce(Field("foo"), 0).notin([0, 1])
c2 = fn.Coalesce(Field("foo", table=self.t), 0).notin([0, 1])
self.assertEqual('COALESCE("foo",0) NOT IN (0,1)', str(c1))
self.assertEqual('COALESCE("notin"."foo",0) NOT IN (0,1)', str(c2))
class LikeTests(unittest.TestCase):
t = Table("abc", alias="like")
def test_like_starts_with(self):
c1 = Field("foo").like("ab%")
c2 = Field("foo", table=self.t).like("ab%")
self.assertEqual("\"foo\" LIKE 'ab%'", str(c1))
self.assertEqual('"like"."foo" LIKE \'ab%\'', str(c2))
def test_like_contains(self):
c1 = Field("foo").like("%ab%")
c2 = Field("foo", table=self.t).like("%ab%")
self.assertEqual("\"foo\" LIKE '%ab%'", str(c1))
self.assertEqual('"like"."foo" LIKE \'%ab%\'', str(c2))
def test_like_ends_with(self):
c1 = Field("foo").like("%ab")
c2 = Field("foo", table=self.t).like("%ab")
self.assertEqual("\"foo\" LIKE '%ab'", str(c1))
self.assertEqual('"like"."foo" LIKE \'%ab\'', str(c2))
def test_like_n_chars_long(self):
c1 = Field("foo").like("___")
c2 = Field("foo", table=self.t).like("___")
self.assertEqual("\"foo\" LIKE '___'", str(c1))
self.assertEqual('"like"."foo" LIKE \'___\'', str(c2))
def test_like_single_chars_and_various_chars(self):
c1 = Field("foo").like("a_b%c")
c2 = Field("foo", table=self.t).like("a_b%c")
self.assertEqual("\"foo\" LIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" LIKE \'a_b%c\'', str(c2))
def test_not_like_single_chars_and_various_chars(self):
c1 = Field("foo").not_like("a_b%c")
c2 = Field("foo", table=self.t).not_like("a_b%c")
self.assertEqual("\"foo\" NOT LIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" NOT LIKE \'a_b%c\'', str(c2))
def test_ilike_starts_with(self):
c1 = Field("foo").ilike("ab%")
c2 = Field("foo", table=self.t).ilike("ab%")
self.assertEqual("\"foo\" ILIKE 'ab%'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'ab%\'', str(c2))
def test_ilike_contains(self):
c1 = Field("foo").ilike("%ab%")
c2 = Field("foo", table=self.t).ilike("%ab%")
self.assertEqual("\"foo\" ILIKE '%ab%'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'%ab%\'', str(c2))
def test_ilike_ends_with(self):
c1 = Field("foo").ilike("%ab")
c2 = Field("foo", table=self.t).ilike("%ab")
self.assertEqual("\"foo\" ILIKE '%ab'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'%ab\'', str(c2))
def test_ilike_n_chars_long(self):
c1 = Field("foo").ilike("___")
c2 = Field("foo", table=self.t).ilike("___")
self.assertEqual("\"foo\" ILIKE '___'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'___\'', str(c2))
def test_ilike_single_chars_and_various_chars(self):
c1 = Field("foo").ilike("a_b%c")
c2 = Field("foo", table=self.t).ilike("a_b%c")
self.assertEqual("\"foo\" ILIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" ILIKE \'a_b%c\'', str(c2))
def test_not_ilike_single_chars_and_various_chars(self):
c1 = Field("foo").not_ilike("a_b%c")
c2 = Field("foo", table=self.t).not_ilike("a_b%c")
self.assertEqual("\"foo\" NOT ILIKE 'a_b%c'", str(c1))
self.assertEqual('"like"."foo" NOT ILIKE \'a_b%c\'', str(c2))
def test_rlike_escape_chars(self):
c1 = Field("foo").rlike("\\\\d+$")
c2 = Field("foo", table=self.t).rlike("\\\\d+$")
self.assertEqual("\"foo\" RLIKE '\\\\d+$'", str(c1))
self.assertEqual('"like"."foo" RLIKE \'\\\\d+$\'', str(c2))
def test_glob_single_chars_and_various_chars(self):
c1 = Field("foo").glob("a_b*")
c2 = Field("foo", table=self.t).glob("a_b*")
self.assertEqual("\"foo\" GLOB 'a_b*'", str(c1))
self.assertEqual('"like"."foo" GLOB \'a_b*\'', str(c2))
class ExistsCriterionTests(unittest.TestCase):
t2 = Table("abc", alias="t2")
q2 = QueryBuilder().from_(t2).select(t2.field2)
def test_exists(self):
t1 = Table("def", alias="t1")
q1 = QueryBuilder().from_(t1).where(ExistsCriterion(self.q2)).select(t1.field1)
self.assertEqual(
'SELECT "t1"."field1" FROM "def" "t1" WHERE EXISTS (SELECT "t2"."field2" FROM "abc" "t2")', str(q1)
)
def test_not_exists(self):
t1 = Table("def", alias="t1")
q1 = QueryBuilder().from_(t1).where(ExistsCriterion(self.q2).negate()).select(t1.field1)
self.assertEqual(
'SELECT "t1"."field1" FROM "def" "t1" WHERE NOT EXISTS (SELECT "t2"."field2" FROM "abc" "t2")', str(q1)
)
class ComplexCriterionTests(unittest.TestCase):
table_abc, table_efg = Table("abc", alias="cx0"), Table("efg", alias="cx1")
def test_and(self):
c1 = (Field("foo") == 1) & (Field("bar") == 2)
c2 = (Field("foo", table=self.table_abc) == 1) & (Field("bar", table=self.table_efg) == 2)
self.assertEqual('"foo"=1 AND "bar"=2', str(c1))
self.assertEqual('"cx0"."foo"=1 AND "cx1"."bar"=2', str(c2))
def test_or(self):
c1 = (Field("foo") == 1) | (Field("bar") == 2)
c2 = (Field("foo", table=self.table_abc) == 1) | (Field("bar", table=self.table_efg) == 2)
self.assertEqual('"foo"=1 OR "bar"=2', str(c1))
self.assertEqual('"cx0"."foo"=1 OR "cx1"."bar"=2', str(c2))
def test_xor(self):
c1 = (Field("foo") == 1) ^ (Field("bar") == 2)
c2 = (Field("foo", table=self.table_abc) == 1) ^ (Field("bar", table=self.table_efg) == 2)
self.assertEqual('"foo"=1 XOR "bar"=2', str(c1))
self.assertEqual('"cx0"."foo"=1 XOR "cx1"."bar"=2', str(c2))
def test_function_and(self):
c1 = fn.IsNull(Field("foo")) & (Field("bar") == 2)
self.assertEqual('ISNULL("foo") AND "bar"=2', str(c1))
def test_function_or(self):
c1 = fn.IsNull(Field("foo")) | (Field("bar") == 2)
self.assertEqual('ISNULL("foo") OR "bar"=2', str(c1))
def test_function_xor(self):
c1 = fn.IsNull(Field("foo")) ^ (Field("bar") == 2)
self.assertEqual('ISNULL("foo") XOR "bar"=2', str(c1))
def test__nested__and(self):
c = (Field("foo") == 1) & (Field("bar") == 2) & (Field("buz") == 3)
self.assertEqual('"foo"=1 AND "bar"=2 AND "buz"=3', str(c))
def test__nested__or(self):
c = (Field("foo") == 1) | (Field("bar") == 2) | (Field("buz") == 3)
self.assertEqual('"foo"=1 OR "bar"=2 OR "buz"=3', str(c))
def test__nested__mixed(self):
c = ((Field("foo") == 1) & (Field("bar") == 2)) | (Field("buz") == 3)
self.assertEqual('("foo"=1 AND "bar"=2) OR "buz"=3', str(c))
def test__between_and_isin(self):
c = Field("foo").isin([1, 2, 3]) & Field("bar").between(0, 1)
self.assertEqual('"foo" IN (1,2,3) AND "bar" BETWEEN 0 AND 1', str(c))
def test__between_and_field(self):
c1 = Field("foo").between(0, 1)
c2 = Field("bool_field")
self.assertEqual('"foo" BETWEEN 0 AND 1 AND "bool_field"', str(c1 & c2))
self.assertEqual('"bool_field" AND "foo" BETWEEN 0 AND 1', str(c2 & c1))
class FieldsAsCriterionTests(unittest.TestCase):
def test__field_and_field(self):
c1 = Field("a")
c2 = Field("b")
self.assertEqual('"a" AND "b"', str(c1 & c2))
def test__field_or_field(self):
c1 = Field("a")
c2 = Field("b")
self.assertEqual('"a" OR "b"', str(c1 | c2))
def test__field_xor_field(self):
c1 = Field("a")
c2 = Field("b")
self.assertEqual('"a" XOR "b"', str(c1 ^ c2))
class CriterionOperationsTests(unittest.TestCase):
table_abc, table_efg = Table("abc", alias="cx0"), Table("efg", alias="cx1")
def test_field_replace_table(self):
f = self.table_abc.foo.replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"', str(f))
def test_arithmeticfunction_replace_table(self):
f = (self.table_abc.foo + self.table_abc.bar).replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"+"cx1"."bar"', str(f))
def test_criterion_replace_table(self):
f = (self.table_abc.foo < self.table_abc.bar).replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"<"cx1"."bar"', str(f))
def test_complexcriterion_replace_table(self):
f = (self.table_abc.foo < self.table_abc.bar) & (self.table_abc.fiz > self.table_abc.buz)
f = f.replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo"<"cx1"."bar" AND "cx1"."fiz">"cx1"."buz"', str(f))
def test_function_with_only_fields_replace_table(self):
f = fn.Sum(self.table_abc.foo).replace_table(self.table_abc, self.table_efg)
self.assertEqual('SUM("cx1"."foo")', str(f))
def test_function_with_values_and_fields_replace_table(self):
f = Mod(self.table_abc.foo, 2).replace_table(self.table_abc, self.table_efg)
self.assertEqual('MOD("cx1"."foo",2)', str(f))
def test_betweencriterion_replace_table(self):
f = self.table_abc.foo[0:1].replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo" BETWEEN 0 AND 1', str(f))
def test_nullcriterion_replace_table(self):
f = self.table_abc.foo.isnull().replace_table(self.table_abc, self.table_efg)
self.assertEqual('"cx1"."foo" IS NULL', str(f))
class AnyTests(unittest.TestCase):
def test_zero_args_returns_empty_criterion(self):
crit = Criterion.any()
self.assertIsInstance(crit, EmptyCriterion)
def test_single_arg_returns_self(self):
f = Field("a")
crit = Criterion.any([f])
self.assertEqual(str(f), str(crit))
def test_multiple_args_returned_in_chain_of_ors(self):
crit = Criterion.any([Field("a"), Field("b"), Field("c"), Field("d")])
self.assertEqual(str(crit), '"a" OR "b" OR "c" OR "d"')
def test_with_generator(self):
crit = Criterion.any(Field(letter) for letter in "abcd")
self.assertEqual(str(crit), '"a" OR "b" OR "c" OR "d"')
class EmptyCriterionTests(unittest.TestCase):
def test_fields_(self):
empty_criterion = EmptyCriterion()
self.assertEqual(len(empty_criterion.fields_()), 0)
class AllTests(unittest.TestCase):
def test_zero_args_returns_empty_criterion(self):
crit = Criterion.all()
self.assertIsInstance(crit, EmptyCriterion)
def test_single_arg_returns_self(self):
f = Field("a")
crit = Criterion.all([f])
self.assertEqual(str(f), str(crit))
def test_multiple_args_returned_in_chain_of_ors(self):
crit = Criterion.all([Field("a"), Field("b"), Field("c"), Field("d")])
self.assertEqual(str(crit), '"a" AND "b" AND "c" AND "d"')
def test_with_generator(self):
crit = Criterion.all(Field(letter) for letter in "abcd")
self.assertEqual(str(crit), '"a" AND "b" AND "c" AND "d"')
| |
import os
from . import helpers
def input_block(config, section):
"""Return the input block as a string."""
block = ''
if section not in config:
return ''
for key in config[section]:
value = config[section][key].strip()
if value:
block += '{} {}\n'.format(key, value)
else:
block += key + '\n'
return block
def create_link_files(config, dirname='.'):
"""Create a 'link-files' executable script."""
file = os.path.join(dirname, 'link-files')
with open(file, 'a') as f:
f.write('#!/bin/bash\n'
'QE="../../1-qe"\n'
'\n'
'ln -sf $QE/2-wfn/WFN 1-epsilon/WFN\n'
'ln -sf $QE/3-wfnq/WFN 1-epsilon/WFNq\n'
'ln -sf $QE/4-wfn_co/RHO 2-sigma/RHO\n'
'ln -sf $QE/4-wfn_co/WFN 2-sigma/WFN_inner\n'
'ln -sf $QE/4-wfn_co/WFN 3-kernel/WFN_co\n'
'ln -sf $QE/4-wfn_co/WFN 4-absorption/WFN_co\n'
'ln -sf $QE/4-wfn_co/WFN 5-inteqp/WFN_co\n'
'ln -sf $QE/4-wfn_co/vxc.dat 2-sigma/vxc.dat\n'
'ln -sf $QE/5-wfn_fi/WFN 4-absorption/WFN_fi\n'
'ln -sf $QE/6-wfnq_fi/WFN 4-absorption/WFNq_fi\n'
'ln -sf $QE/7-bands/WFN 5-inteqp/WFN_fi\n'
'\n'
'ln -sf ../1-epsilon/eps0mat 2-sigma\n'
'ln -sf ../1-epsilon/eps0mat 3-kernel\n'
'ln -sf ../1-epsilon/eps0mat 4-absorption\n'
'ln -sf ../1-epsilon/epsmat 2-sigma\n'
'ln -sf ../1-epsilon/epsmat 3-kernel\n'
'ln -sf ../1-epsilon/epsmat 4-absorption\n'
'ln -sf ../2-sigma/eqp1.dat 4-absorption/eqp_co.dat\n'
'ln -sf ../2-sigma/eqp1.dat 5-inteqp/eqp_co.dat\n'
'ln -sf ../3-kernel/bsedmat 4-absorption\n'
'ln -sf ../3-kernel/bsexmat 4-absorption\n'
)
helpers.make_executable(file)
def create_epsilon(config, dirname='.'):
"""Create 1-epsilon directory and its input files."""
dirpath = os.path.join(dirname, '1-epsilon')
setup = os.path.join(dirpath, '0-setup.sh')
qpoints = os.path.join(dirpath, 'qpoints')
os.makedirs(dirpath)
with open(setup, 'a') as f:
f.writelines([
'#!/bin/bash\n'
'num_kp=$(cat qpoints | wc -l)\n',
'\n',
'# Create epsilon.inp for every kpoints inside the qpoints file\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="eps$(seq -f "%02g" $i $i)"\n',
'\n',
' if [[ -z $1 ]]; then\n',
' mkdir ${dir}\n',
' cd $dir\n',
'\n',
' cat > epsilon.inp <<- EOM\n',
])
f.writelines([
'\t\t\t{}\n'.format(x) for x in input_block(
config, 'epsilon').split('\n') if x.strip()
])
f.writelines([
'\n',
' begin qpoints\n',
' $(sed -n ${i}p ../qpoints)\n',
' end\n',
' EOM\n',
'\n',
' ln -s ../WFN .\n',
' ln -s ../WFNq .\n',
'\n',
' cd ..\n',
' elif [[ $1 == "clean" ]]; then\n',
' rm -rf ${dir}\n',
' fi\n',
'done\n',
'\n',
'# Create an epsmat merge folder\n',
'if [[ -z $1 ]]; then\n',
' nkp=$((num_kp-1))\n',
'\n',
' mkdir merge\n',
' cd merge\n',
'\n',
' echo "{} $nkp" > epsmat_merge.inp\n'.format(
config['epsilon']['epsilon_cutoff']),
'\n',
' for i in $(seq 2 $num_kp); do\n',
' kpoint=$(sed -n ${i}p ../qpoints)\n',
' echo "${kpoint%?}" >> epsmat_merge.inp\n',
'\n',
' dir="eps$(seq -f "%02g" $i $i)"\n',
' epsmat="epsmat$(seq -f "%02g" $i $i)"\n',
' ln -s ../$dir/epsmat $epsmat\n',
' done\n',
'\n',
' echo "$nkp" >> epsmat_merge.inp\n',
'\n',
' for i in $(seq 2 $num_kp); do\n',
' epsmat="epsmat$(seq -f "%02g" $i $i)"\n',
' echo "$epsmat 1" >> epsmat_merge.inp\n',
' done\n',
' cd ..\n',
'elif [[ $1 == "clean" ]]; then\n',
' rm -rf merge\n',
'fi\n',
'\n',
'ln -sf eps01/eps0mat .\n',
'ln -sf merge/epsmat .\n',
])
with open(qpoints, 'a') as f:
f.write('# Replace this file with all the qpoints for epsilon.inp\n')
helpers.make_executable(setup)
def create_sigma(config, dirname='.'):
"""Create 2-sigma directory and its input files."""
dirpath = os.path.join(dirname, '2-sigma')
setup = os.path.join(dirpath, '0-setup.sh')
kpoints = os.path.join(dirpath, 'kpoints')
merge = os.path.join(dirpath, '2-merge.sh')
override = {
'sigma': {
'band_index_min': config['pp_in']['vxc_diag_nmin'],
'band_index_max': config['pp_in']['vxc_diag_nmax'],
},
}
config = helpers.deep_merge(config, override)
os.makedirs(dirpath)
with open(setup, 'a') as f:
f.writelines([
'#!/bin/bash\n'
'num_kp=$(cat kpoints | wc -l)\n',
'\n',
'# Create sigma.inp for every kpoints inside the kpoints file\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="sig$(seq -f "%02g" $i $i)"\n',
'\n',
' if [[ -z $1 ]]; then\n',
' mkdir ${dir}\n',
' cd $dir\n',
'\n',
' cat > sigma.inp <<- EOM\n',
])
f.writelines([
'\t\t\t{}\n'.format(x) for x in input_block(
config, 'sigma').split('\n') if x.strip()
])
f.writelines([
'\n',
' begin kpoints\n',
' $(sed -n ${i}p ../kpoints)\n',
' end\n',
' EOM\n',
'\n',
' ln -s ../RHO .\n',
' ln -s ../WFN_inner .\n',
' ln -s ../eps0mat .\n',
' ln -s ../epsmat .\n',
' ln -s ../vxc.dat .\n',
'\n',
' cd ..\n',
' elif [[ $1 == "clean" ]]; then\n',
' rm -rf ${dir}\n',
' fi\n',
'done\n',
])
with open(kpoints, 'a') as f:
f.write('# Replace this file with all the kpoints for sigma.inp\n')
with open(merge, 'a') as f:
f.writelines([
'#!/bin/bash\n',
'num_kp=$(cat kpoints | wc -l)\n',
'\n',
'for i in $(seq 1 $num_kp); do\n',
' dir="sig$(seq -f "%02g" $i $i)"\n',
' cat $dir/eqp0.dat >> eqp0.dat\n',
' cat $dir/eqp1.dat >> eqp1.dat\n',
'done\n',
])
helpers.make_executable(setup)
helpers.make_executable(merge)
def create_kernel(config, dirname='.'):
"""Create 3-kernel directory and its input files."""
dirpath = os.path.join(dirname, '3-kernel')
inp = os.path.join(dirpath, 'kernel.inp')
clean = os.path.join(dirpath, 'clean')
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'kernel'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm bse* *.log *.out 2> /dev/null\n'
)
helpers.make_executable(clean)
def create_absorption(config, dirname='.'):
"""Create 4-absorption directory and its input files."""
dirpath = os.path.join(dirname, '4-absorption')
inp = os.path.join(dirpath, 'absorption.inp')
clean = os.path.join(dirpath, 'clean')
override = {
'absorption': {
'number_val_bands_coarse': config['kernel']['number_val_bands'],
'number_cond_bands_coarse': config['kernel']['number_cond_bands'],
},
}
config = helpers.deep_merge(config, override)
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'absorption'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm *.log *.out absorption_noeh.dat bandstructure.dat '
'dtmat* \\\n'
' eigenvalues_noeh.dat dcmat_norm.dat dvmat_norm.dat '
'epsdiag.dat vmtxel \\\n'
' eqp.dat eqp_q.dat absorption_eh.dat eigenvalues.dat '
'eigenvectors 2> /dev/null\n'
)
helpers.make_executable(clean)
def create_inteqp(config, dirname='.'):
"""Create 5-inteqp directory and its input files."""
dirpath = os.path.join(dirname, '5-inteqp')
inp = os.path.join(dirpath, 'inteqp.inp')
clean = os.path.join(dirpath, 'clean')
os.makedirs(dirpath)
with open(inp, 'a') as f:
f.write(input_block(config, 'inteqp'))
with open(clean, 'w') as f:
f.write('#!/bin/bash\n'
'rm *.log *.out bandstructure.dat dtmat* dcmat_norm.dat '
'dvmat_norm.dat \\\n'
' eqp.dat eqp_q.dat 2> /dev/null\n'
)
helpers.make_executable(clean)
def create_bgw(config, dirname='.'):
"""Create a new directory '2-bgw' and all its directories."""
dirpath = os.path.join(dirname, '2-bgw')
os.makedirs(dirpath)
create_link_files(config, dirpath)
create_epsilon(config, dirpath)
create_sigma(config, dirpath)
create_kernel(config, dirpath)
create_absorption(config, dirpath)
create_inteqp(config, dirpath)
| |
# Copyright (c) 2015 Russell Sim <russell.sim@gmail.com>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from copy import copy
import json
import logging
import os
from os import path
import re
import textwrap
import xml.sax
from jinja2 import Environment
import prettytable
log = logging.getLogger(__name__)
TYPE_MAP = {
'string': 'string',
'xsd:string': 'string',
'csapi:string': 'string',
'xsd:int': 'integer',
'csapi:uuid': 'string',
'xsd:boolean': 'boolean',
'boolean': 'boolean',
'object': 'object',
'csapi:bool': 'boolean',
'xsd:bool': 'boolean',
'xsd:datetime': 'string',
'regexp': 'string',
'xsd:datetime': 'string',
'xsd:dict': 'object',
'alarm': 'string',
'xsd:timestamp': 'string',
'xsd:char': 'string',
'list': 'array',
'csapi:flavorswithonlyidsnameslinks': 'string',
'csapi:imagestatus': 'string',
'csapi:imageswithonlyidsnameslinks': 'string',
'xsd:enum': 'string',
'xsd:anyuri': 'string',
'csapi:serverforupdate': 'string',
'capi:uuid': 'string',
'xsd:uuid': 'string',
'string': 'string',
'imageapi:string': 'string',
'imageapi:imagestatus': 'string',
'imageapi:uuid': 'string',
'csapi:uuid': 'string',
'csapi:serverforcreate': 'string',
'csapi:blockdevicemapping': 'string',
'csapi:serverswithonlyidsnameslinks': 'string',
'csapi:serverstatus': 'string',
'csapi:dict': 'object',
'imageforcreate': 'string',
'xsd:ip': 'string',
'xsd:base64binary': 'string',
'enum': 'array',
'xsd:float': 'number',
# TODO(arrsim) This array types also set the items
# "tags": {
# "type": "array",
# "items": {
# "type": "string"
'xsd:list': 'array',
'array': 'array',
}
FORMAT_MAP = {
'xsd:anyURI': 'uri',
'xsd:datetime': 'date-time',
'xsd:ip': 'ipv4',
'regexp': 'regexp',
'xsd:timestamp': 'timestamp',
}
STYLE_MAP = {
'template': 'path',
'plain': 'body',
'query': 'query',
'header': 'header',
}
MIME_MAP = {
'json': 'application/json',
'txt': 'text/plain',
'xml': 'application/xml',
}
VERSION_RE = re.compile('v[0-9\.]+')
WHITESPACE_RE = re.compile('[\s]+', re.MULTILINE)
URL_TEMPLATE_RE = re.compile('{[^{}]+}')
CAPTION_RE = re.compile('[*`]*')
MARKUP_RE = re.compile('[.,:;)]+')
environment = Environment()
HTTP_REQUEST = """{{ method }} {{ url }} HTTP/1.1
{% for key, value in headers.items() -%}
{{ key }}: {{ value }}
{% endfor %}
"""
HTTP_REQUEST_TMPL = environment.from_string(HTTP_REQUEST)
HTTP_RESPONSE = """HTTP/1.1 {{ status_code }}
{% for key, value in headers.items() -%}
{{ key }}: {{ value }}
{% endfor %}
{{ body }}
"""
HTTP_RESPONSE_TMPL = environment.from_string(HTTP_RESPONSE)
def create_parameter(name, _in, description='',
type='xsd:string', required=True):
return {
"name": name,
"in": STYLE_MAP[_in],
"description": description,
"required": True if required == 'true' else False,
"type": TYPE_MAP[type.lower()],
"format": FORMAT_MAP.get(type, ''),
}
def join_url(parts):
"""Return a joined url without any duplicate slashes"""
return '/'.join(parts).replace('//', '/')
class SubParser(xml.sax.ContentHandler):
def __init__(self, parent):
# general state
self.tag_stack = []
self.attr_stack = []
self.parent = parent
self.result = None
self.kwargs = {}
def startElement(self, name, _attrs):
attrs = dict(_attrs)
self.tag_stack.append(name)
self.attr_stack.append(attrs)
return attrs
def endElement(self, name):
self.tag_stack.pop()
self.attr_stack.pop()
if not self.tag_stack:
self.parent.detach_subparser(self.result, **self.kwargs)
def search_stack_for(self, tag_name):
for tag, attrs in zip(reversed(self.tag_stack),
reversed(self.attr_stack)):
if tag == tag_name:
return attrs
def on_top_tag_stack(self, *args):
return self.tag_stack[-len(args):] == list(args)
class TableMixin(object):
def visit_table(self, attrs):
self.__table = prettytable.PrettyTable(hrules=prettytable.ALL)
self.__table.header = False
def depart_table(self):
self.content.append('\n\n')
self.content.append(str(self.__table))
self.content.append('\n\n')
def visit_caption(self, attrs):
self.content_stack.append([])
def depart_caption(self):
content = ''.join(self.content_stack.pop()).strip()
content = CAPTION_RE.sub('', content)
content = WHITESPACE_RE.sub(' ', content)
content = '**' + content + '**'
self.content.append(content)
def visit_th(self, attrs):
self.__table.header = True
def depart_th(self):
heading = self.content.pop().strip()
self.__table.field_names.append(heading)
self.__table.align[heading] = 'l'
self.__table.valign[heading] = 't'
self.__table.max_width[heading] = 80
def visit_tr(self, attrs):
self.__row = []
def visit_td(self, attrs):
self.content_stack.append([])
def depart_td(self):
self.__row.append(''.join(self.content_stack.pop()).strip())
def depart_tr(self):
if self.__row:
columns = len(self.__table.field_names)
self.__row.extend(['' for n in range(columns - len(self.__row))])
self.__table.add_row(self.__row)
class ParaParser(SubParser, TableMixin):
EMPHASIS = {
'bold': '**',
'italic': '*'
}
def __init__(self, parent):
super(ParaParser, self).__init__(parent)
self.content_stack = [[]]
self.current_emphasis = None
self.nesting = 0
self.no_space = False
self.fill_width = 67
self.wrapper = textwrap.TextWrapper(width=self.fill_width)
self.shortdesc = False
self.inline_markup_stack = []
self.hyperlink_end = False
self.litblockstr = ''
self.base_indent = ' '
self.markup_end = False
@property
def content(self):
return self.content_stack[-1]
def startElement(self, name, _attrs):
super(ParaParser, self).startElement(name, _attrs)
fn = getattr(self, 'visit_%s' % name, None)
if fn:
fn(dict(_attrs))
def endElement(self, name):
content = ''.join(self.content)
self.result = content
super(ParaParser, self).endElement(name)
fn = getattr(self, 'depart_%s' % name, None)
if fn:
fn()
def characters(self, content):
if not content:
return
# Fold up any white space into a single char
if not self.on_top_tag_stack('programlisting'):
content = WHITESPACE_RE.sub(' ', content)
if content == ' ':
return
if content[0] == '\n':
return
if self.content:
if self.content[-1].endswith('\n'):
content = ' ' * self.nesting + content.strip()
elif self.content[-1].endswith(' '):
content = content.strip()
elif (self.on_top_tag_stack('programlisting')):
if self.content[-1].endswith('<'):
pass
else:
if self.search_stack_for('itemizedlist') is None:
content = '\n' + ' ' * self.nesting + content
else:
content = '\n' + self.base_indent * self.nesting + \
' ' + content
elif self.no_space:
content = content.strip()
elif self.hyperlink_end:
self.hyperlink_end = False
if content == '.' or content == ':':
pass
else:
content = ' ' + content.strip()
elif self.markup_end:
self.markup_end = False
if MARKUP_RE.match(content):
pass
else:
content = ' ' + content.strip()
else:
content = ' ' + content.strip()
if self.no_space is True:
self.inline_markup_stack.append(content)
else:
self.content.append(content)
def visit_listitem(self, attrs):
self.nesting = len([tag for tag in self.tag_stack
if tag == 'listitem']) - 1
if self.nesting > 0:
prev_nesting = self.nesting - 1
self.base_indent = ' ' * prev_nesting + ' '
else:
self.base_indent = ' '
self.content_stack.append([self.base_indent * self.nesting + '-'])
self.wrapper = textwrap.TextWrapper(
width=self.fill_width,
initial_indent=' ',
subsequent_indent=self.base_indent * self.nesting + ' ',)
def depart_listitem(self):
content = self.content_stack.pop()
self.content.append(''.join(content))
if self.content[-1].endswith('\n\n'):
pass
else:
self.content.append('\n')
self.nesting = len([tag for tag in self.tag_stack
if tag == 'listitem']) - 1
if self.nesting > 0:
prev_nesting = self.nesting - 1
self.base_indent = ' ' * prev_nesting + ' '
else:
self.base_indent = ' '
def depart_itemizedlist(self):
if self.search_stack_for('itemizedlist') is None:
self.wrapper = textwrap.TextWrapper(width=self.fill_width)
else:
self.wrapper = textwrap.TextWrapper(
width=self.fill_width,
initial_indent=self.base_indent * self.nesting + ' ',
subsequent_indent=self.base_indent * self.nesting + ' ',)
def depart_orderedlist(self):
if self.search_stack_for('itemizedlist') is None:
self.wrapper = textwrap.TextWrapper(width=self.fill_width)
def visit_para(self, attrs):
if attrs.get('role') == 'shortdesc':
self.shortdesc = True
self.content_stack.append([''])
if self.search_stack_for('itemizedlist') is not None:
return
if self.content:
if self.content[-1].endswith('\n\n'):
pass
elif self.content[-1].endswith('\n'):
self.content.append('\n')
def depart_para(self):
content = ''.join(self.content_stack.pop()).strip()
literal_block = True
parts = content.partition('::\n\n')
if parts[0] == content:
parts = content.partition('.. code-block::')
if parts[0] == content:
literal_block = False
else:
parts = content.partition(self.litblockstr)
if literal_block:
wrapped = self.wrapper.wrap(parts[0])
wrapped = '\n'.join(wrapped)
litcontent = parts[2].partition('\n\n')
if self.search_stack_for('itemizedlist') is None:
wrapped += '' + parts[1] + litcontent[0] + '\n'
else:
indent = self.base_indent * self.nesting + ' '
wrapped += indent + parts[1] + indent + litcontent[0] + '\n'
postwrap = self.wrapper.wrap(litcontent[2])
postwrap = '\n'.join(postwrap)
wrapped += postwrap
self.content.append(wrapped)
else:
wrapped = self.wrapper.wrap(content)
self.content.append('\n'.join(wrapped))
self.content.append('\n\n')
if self.search_stack_for('itemizedlist') is None:
pass
else:
self.wrapper = textwrap.TextWrapper(
width=self.fill_width,
initial_indent=self.base_indent * self.nesting + ' ',
subsequent_indent=self.base_indent * self.nesting + ' ',)
if self.shortdesc is True:
self.kwargs['shortdesc'] = self.result.strip()
# Reset state variables
self.content_stack = [[]]
self.shortdesc = False
def visit_title(self, attrs):
self.current_emphasis = attrs.get('role', 'bold')
self.no_space = True
def depart_title(self):
content = ' ' + self.EMPHASIS[self.current_emphasis]
content += ' '.join(self.inline_markup_stack[0:None])
content += self.EMPHASIS[self.current_emphasis]
self.content.append(content)
self.content.append('\n\n')
self.inline_markup_stack[:] = []
self.no_space = False
self.current_emphasis = None
def visit_code(self, attrs):
self.no_space = True
def depart_code(self):
content = ' ``'
if self.content:
if self.content[-1].endswith('(') or \
self.content[-1].endswith(' '):
content = '``'
content += ' '.join(self.inline_markup_stack[0:None])
content += '``'
self.content.append(content)
self.inline_markup_stack[:] = []
self.no_space = False
self.markup_end = True
def visit_emphasis(self, attrs):
# Bold is the default emphasis
self.current_emphasis = attrs.get('role', 'bold')
self.no_space = True
def depart_emphasis(self):
content = ' ' + self.EMPHASIS[self.current_emphasis]
if self.content:
if self.content[-1].endswith('(') or \
self.content[-1].endswith(' '):
content = '' + self.EMPHASIS[self.current_emphasis]
content += ' '.join(self.inline_markup_stack[0:None])
content += self.EMPHASIS[self.current_emphasis]
self.content.append(content)
self.inline_markup_stack[:] = []
self.no_space = False
self.current_emphasis = None
self.markup_end = True
def visit_programlisting(self, attrs):
self.nesting = 3
if not attrs:
if self.search_stack_for('itemizedlist') is None:
self.content.append('::\n\n')
else:
self.content.append(self.base_indent * self.nesting +
' ' + '::\n\n')
else:
if self.search_stack_for('itemizedlist') is None:
self.litblockstr = (
'.. code-block:: %s\n\n' % attrs['language']
)
self.content.append(
'.. code-block:: %s\n\n' % attrs['language']
)
else:
self.content.append(
self.base_indent * self.nesting +
' ' + '.. code-block:: %s\n\n' % attrs['language']
)
def depart_programlisting(self):
self.nesting = 0 # no indent for blank lines
self.content.append('\n\n')
def visit_link(self, attrs):
if attrs:
self.inline_markup_stack.append(attrs['xlink:href'])
self.no_space = True
def depart_link(self):
content = ' `'
# anonymous link
if len(self.inline_markup_stack) is 1:
content += ('<%s>`__' % self.inline_markup_stack[0])
else:
content += ' '.join(self.inline_markup_stack[1:None])
content += (' <%s>`_' % self.inline_markup_stack[0])
self.content.append(content)
self.inline_markup_stack[:] = []
self.no_space = False
self.hyperlink_end = True
class WADLHandler(xml.sax.ContentHandler):
def __init__(self, filename, api_ref):
self.filename = filename
self.api_ref = api_ref
self.method_tag_map = {method.split('#', 1)[1]: tag
for method, tag
in self.api_ref['method_tags'].items()
if method.split('#', 1)[0] == filename}
self.resource_tag_map = {resource.split('#', 1)[1]: tag
for resource, tag
in self.api_ref['resource_tags'].items()
if resource.split('#', 1)[0] == filename}
self.file_tag = self.api_ref['file_tags'].get(filename, None)
self.actual_tags = set(tag['name'] for tag in self.api_ref['tags'])
def startDocument(self):
# API state
self.apis = {}
self.current_api = None
self.schemas = {}
# Resource Mapping
self.resource_map = {}
self.resource_types = {}
self.resource_ids = defaultdict(list)
self.resource_id_stack = []
# URL paths
self.url_map = {}
self.url_params = {}
self.url = []
# general state
self.tag_stack = []
self.attr_stack = []
self.content = None
self.parser = None
def detach_subparser(self, result, **kwargs):
self.parser = None
self.result_fn(result, **kwargs)
self.result_fn = None
def attach_subparser(self, parser, result_fn):
self.parser = parser
self.result_fn = result_fn
def endDocument(self):
for api in self.apis.values():
for method in api:
method['consumes'] = list(method['consumes'])
method['produces'] = list(method['produces'])
def parameter_description(self, content, **kwargs):
name = self.search_stack_for('param')['name']
self.url_params[name] = content.strip()
def api_summary(self, content, **kwargs):
if kwargs.get('shortdesc'):
self.current_api['summary'] = kwargs['shortdesc']
self.current_api['description'] = content.strip()
def request_parameter_description(self, content, **kwargs):
param = self.search_stack_for('param')
style = STYLE_MAP[param['style']]
name = param['name']
if style == 'body':
parameters = self.current_api['parameters']
schema_name = parameters[0]['schema']['$ref'].rsplit('/', 1)[1]
schema = self.schemas[schema_name]
schema['properties'][name]['description'] = content.strip()
else:
self.current_api['parameters'][-1]['description'] = content.strip()
def response_schema_description(self, content, **kwargs):
status_code = self.search_stack_for('response')['status']
if ' ' in status_code:
status_codes = status_code.split(' ')
if '200' in status_codes:
status_code = '200'
# TODO(arrsim) need to do something with the other status
# codes
param = self.search_stack_for('param')
style = STYLE_MAP[param['style']]
name = param['name']
if style == 'header':
response = self.current_api['responses'][status_code]
response['headers'][name]['description'] = content.strip()
elif style == 'body':
parameters = self.current_api['parameters']
if len(parameters) > 0:
schema_name = parameters[0]['schema']['$ref'].rsplit('/', 1)[1]
schema_name = schema_name + '_' + status_code
schema = self.schemas[schema_name]
schema['properties'][name]['description'] = content.strip()
def search_stack_for(self, tag_name):
for tag, attrs in zip(reversed(self.tag_stack),
reversed(self.attr_stack)):
if tag == tag_name:
return attrs
def on_top_tag_stack(self, *args):
return self.tag_stack[-len(args):] == list(args)
def startElement(self, name, _attrs):
attrs = dict(_attrs)
if name == 'wadl:doc':
if self.on_top_tag_stack('resource', 'param'):
self.attach_subparser(ParaParser(self),
self.parameter_description)
if self.on_top_tag_stack('method'):
self.current_api['title'] = attrs.get('title')
self.attach_subparser(ParaParser(self), self.api_summary)
if self.on_top_tag_stack('request', 'representation',
'param'):
self.attach_subparser(ParaParser(self),
self.request_parameter_description)
if self.on_top_tag_stack('response', 'representation', 'param'):
self.attach_subparser(ParaParser(self),
self.response_schema_description)
if self.parser:
return self.parser.startElement(name, _attrs)
self.tag_stack.append(name)
self.attr_stack.append(attrs)
self.content = []
if name == 'method':
if 'id' in attrs and 'name' in attrs:
id = attrs['id']
if id in self.url_map:
url = self.url_map[id]
elif id in self.resource_map:
resource = self.resource_map[id]
url = self.resource_types[resource]
else:
log.warning("Can't find method %s", id)
# Create the minimal object to prevent creating
# exceptions for this case everywhere.
self.current_api = {
'produces': set(),
'consumes': set(),
'examples': {},
'responses': {},
'parameters': {},
}
return
tag = self.method_tag_map.get(id, '')
name = attrs['name'].lower()
if url in self.apis:
root_api = self.apis[url]
else:
self.apis[url] = root_api = []
self.current_api = {
'operationId': id,
'tags': set(),
'method': name,
'produces': set(),
'consumes': set(),
'examples': {},
'parameters': [{'in': "body",
'name': "body",
'description': "",
'required': False,
'schema': {
'$ref': "#/definitions/%s" % id
}}],
'responses': {},
}
if tag:
self.current_api['tags'].add(tag)
elif id in self.resource_ids:
for tag_id in reversed(self.resource_ids[id]):
r_tag_id = self.resource_tag_map.get(tag_id)
if r_tag_id not in self.actual_tags:
continue
self.current_api['tags'].add(r_tag_id)
break
if not self.current_api['tags']:
if self.file_tag:
self.current_api['tags'].add(self.file_tag)
self.current_api['tags'] = list(self.current_api['tags'])
# If there are no tags then we couldn't find the
# method in the chapters.
if self.current_api['tags']:
root_api.append(self.current_api)
else:
log.warning("No tags for method %s" % id)
for param, doc in self.url_params.items():
if ('{%s}' % param) in url:
self.current_api['parameters'].append(
create_parameter(param, 'template', doc))
# URL paths
if name == 'resource':
self.url.append(attrs.get('path', '').replace('//', '/'))
self.resource_id_stack.append(attrs.get('id', None))
if self.on_top_tag_stack('resource_type', 'method'):
self.resource_map[attrs.get('href').strip('#')] \
= self.attr_stack[-2]['id']
# Methods and Resource Types
if name == 'resource' and attrs.get('type'):
self.resource_types[attrs.get('type').strip('#')] \
= join_url(self.url)
if self.on_top_tag_stack('resource', 'method'):
href = attrs.get('href').strip('#')
self.url_map[href] = join_url(self.url)
self.resource_ids[href] = [r_id for r_id in self.resource_id_stack
if r_id]
if name == 'xsdxt:code':
if not attrs.get('href'):
return
if self.search_stack_for('response') is not None:
type = 'response'
status_code = self.search_stack_for('response')['status']
if ' ' in status_code:
status_codes = status_code.split(' ')
if '200' in status_codes:
status_code = '200'
# TODO(arrsim) need to do something with the other
# status codes
elif self.search_stack_for('request') is not None:
type = 'request'
else:
log.error("Can't find request or response tag. %s",
self.tag_stack)
raise Exception("Can't find request or response tag.")
media_type = MIME_MAP[attrs['href'].rsplit('.', 1)[-1]]
# XML is removed, skip all these
if media_type == 'application/xml':
return
pathname = path.join(path.dirname(self.filename), attrs['href'])
try:
sample = open(pathname).read()
if media_type == 'application/json':
sample = json.loads(sample)
except IOError:
log.warning("Can't find file %s" % pathname)
sample = None
if media_type != 'text/plain':
self.current_api['produces'].add(media_type)
self.current_api['consumes'].add(media_type)
if sample and type == 'response':
response = self.current_api['responses'][status_code]
response['examples'][media_type] = sample
elif sample and type == 'request':
# Add request examples (Not swagger supported)
self.current_api['examples'][media_type] = sample
if name == 'response':
if 'status' not in attrs:
return
status_code = attrs['status']
response = {
'description': '',
'schema': {},
'headers': {},
'examples': {},
}
if ' ' in status_code:
status_codes = status_code.split(' ')
for status_code in status_codes:
# For each of the multiple status make copies of
# blank responses? The duplicates will be ignored
# by subsequent calls that update the response object.
self.current_api['responses'][status_code] = copy(response)
else:
self.current_api['responses'][status_code] = response
if self.on_top_tag_stack('request', 'representation', 'param'):
parameters = self.current_api['parameters']
name = attrs['name']
parameter = create_parameter(
name=name,
_in=attrs.get('style', 'plain'),
description='',
type=attrs.get('type', 'string'),
required=attrs.get('required'))
if parameter['in'] == 'body':
schema_name = parameters[0]['schema']['$ref'].rsplit('/', 1)[1]
if schema_name not in self.schemas:
self.schemas[schema_name] = {'type': 'object',
'properties': {}}
schema_properties = self.schemas[schema_name]['properties']
schema_properties[parameter['name']] = parameter
del parameter['name']
del parameter['in']
else:
parameters.append(parameter)
if self.on_top_tag_stack('response', 'representation', 'param'):
parameters = self.current_api['parameters']
status_code = self.attr_stack[-3]['status']
if ' ' in status_code:
status_codes = status_code.split(' ')
if '200' in status_codes:
status_code = '200'
# TODO(arrsim) need to do something with the other status codes
name = attrs['name']
parameter = create_parameter(
name=name,
_in=attrs.get('style', 'plain'),
description='',
type=attrs.get('type', 'string'),
required=attrs.get('required'))
if parameter['in'] == 'body':
if len(parameters) > 0:
s = parameters[0]['schema']['$ref']
schema_name = s.rsplit('/', 1)[1]
schema_name = schema_name + '_' + status_code
if schema_name not in self.schemas:
self.schemas[schema_name] = {'type': 'object',
'properties': {}}
schema_properties = self.schemas[schema_name]['properties']
schema_properties[parameter['name']] = parameter
response = self.current_api['responses'][status_code]
response['schema']['$ref'] = "#/definitions/%s" % \
schema_name
del parameter['name']
del parameter['in']
elif parameter['in'] == 'header':
headers = self.current_api['responses'][status_code]['headers']
headers[parameter['name']] = parameter
del parameter['name']
del parameter['in']
def endElement(self, name):
if self.parser:
return self.parser.endElement(name)
if self.current_api and name == 'method':
# Clean up the parameters of methods that have take no
# body content.
parameters = self.current_api['parameters']
if parameters and 'schema' in parameters[0]:
schema_name = parameters[0]['schema']['$ref'].rsplit('/', 1)[1]
if schema_name not in self.schemas:
self.current_api['parameters'] \
= self.current_api['parameters'][1:]
# URL paths
if name == 'resource':
self.url.pop()
self.resource_id_stack.pop()
self.tag_stack.pop()
self.attr_stack.pop()
def characters(self, content):
if self.parser:
return self.parser.characters(content)
content = content.strip()
if content:
self.content.append(content)
def main1(source_file, output_dir):
log.info('Reading API description from %s' % source_file)
api_ref = json.load(open(source_file))
files = set()
for filepath in api_ref['method_tags'].keys():
files.add(filepath.split('#', 1)[0])
for filepath in api_ref['resource_tags'].keys():
files.add(filepath.split('#', 1)[0])
for filepath in api_ref['file_tags'].keys():
files.add(filepath.split('#', 1)[0])
# Load supplementary examples file
example_name = (api_ref['service']
.replace('-admin', '')
.replace('-extensions', ''))
examples_file = path.join(path.dirname(source_file),
example_name + '-examples.json')
if path.exists(examples_file):
log.info('Reading examples from %s' % examples_file)
examples = json.load(open(examples_file))
else:
examples = []
output = {
u'info': {
'version': api_ref['version'],
'title': api_ref['title'],
'service': api_ref['service'],
'license': {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
}
},
u'paths': defaultdict(list),
u'schemes': [],
u'tags': api_ref['tags'],
u'basePath': "",
u'securityDefinitions': {},
u'host': "",
u'definitions': {},
u'externalDocs': {
'description': 'OpenStack Docs',
'url': 'http://docs.openstack.org',
},
u"swagger": u"2.0",
}
for file in files:
log.info('Parsing %s' % file)
abs_filename = path.abspath(file)
ch = WADLHandler(abs_filename, api_ref)
xml.sax.parse(file, ch)
for urlpath, apis in ch.apis.items():
output['paths'][urlpath].extend(apis)
output['definitions'].update(ch.schemas)
for ex_request, ex_response in examples:
for urlpath in output['paths']:
url_matcher = "^" + URL_TEMPLATE_RE.sub('[^/]+', urlpath) + "$"
method = ex_request['method'].lower()
if re.match(url_matcher, ex_request['url']):
method_count = defaultdict(int)
for operation in output['paths'][urlpath]:
method_count[operation['method'].lower()] += 1
if any(i > 1 for i in method_count.values()):
# Skip any of the multi-payload endpoints. They
# are madness.
break
for operation in output['paths'][urlpath]:
if operation['method'].lower() == method:
break
else:
log.warning("Couldn't find any operations %s for %s",
method, urlpath)
break
request = HTTP_REQUEST_TMPL.render(
headers=ex_request['headers'],
method=ex_request['method'],
url=ex_request['url'])
operation['examples'] = {'text/plain': request}
# Override any responses
status_code = ex_response['status_code']
response = HTTP_RESPONSE_TMPL.render(
status_code=status_code,
headers=ex_response['headers'],
body=ex_response['body'] or '')
if status_code in operation['responses']:
operation['responses'][status_code]['examples'] = \
{'text/plain': response}
else:
operation['responses'][status_code] = \
{'examples': {'text/plain': response}}
else:
log.warning("Service %s %s doesn't have matching "
"URL for example %s %s",
output['info']['service'], output['info']['version'],
method, ex_request['url'])
os.chdir(output_dir)
pathname = '%s-%s-swagger.json' % (api_ref['service'],
api_ref['version'])
with open(pathname, 'w') as out_file:
json.dump(output, out_file, indent=2, sort_keys=True)
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help="Increase verbosity (specify multiple times for more)")
parser.add_argument(
'-o', '--output-dir', action='store',
help="The directory to output the JSON files too.")
parser.add_argument(
'filename',
help="File to convert")
args = parser.parse_args()
log_level = logging.WARNING
if args.verbose == 1:
log_level = logging.INFO
elif args.verbose >= 2:
log_level = logging.DEBUG
logging.basicConfig(
level=log_level,
format='%(asctime)s %(name)s %(levelname)s %(message)s')
filename = path.abspath(args.filename)
main1(filename, output_dir=args.output_dir)
| |
# -*- coding: utf-8 -*-
import functools
import time
import datetime
import mock
from factory import SubFactory
from factory.fuzzy import FuzzyDateTime, FuzzyAttribute, FuzzyChoice
from mock import patch, Mock
import factory
import pytz
from factory.django import DjangoModelFactory
from django.utils import timezone
from django.db.utils import IntegrityError
from faker import Factory
from reviews import workflow
from website import settings
from website.notifications.constants import NOTIFICATION_TYPES
from website.util import permissions
from website.archiver import ARCHIVER_SUCCESS
from website.identifiers.utils import parse_identifiers
from website.settings import FAKE_EMAIL_NAME, FAKE_EMAIL_DOMAIN
from framework.auth.core import Auth
from osf import models
from osf.models.sanctions import Sanction
from osf.utils.names import impute_names_model
from addons.osfstorage.models import OsfStorageFile
fake = Factory.create()
# If tests are run on really old processors without high precision this might fail. Unlikely to occur.
fake_email = lambda: '{}+{}@{}'.format(FAKE_EMAIL_NAME, int(time.clock() * 1000000), FAKE_EMAIL_DOMAIN)
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
return models.MetaSchema.objects.first()
def FakeList(provider, n, *args, **kwargs):
func = getattr(fake, provider)
return [func(*args, **kwargs) for _ in range(n)]
class UserFactory(DjangoModelFactory):
# TODO: Change this to only generate long names and see what breaks
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
username = factory.LazyFunction(fake_email)
password = factory.PostGenerationMethodCall('set_password',
'queenfan86')
is_registered = True
is_claimed = True
date_confirmed = factory.Faker('date_time_this_decade', tzinfo=pytz.utc)
merged_by = None
verification_key = None
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._build(target_class, *args, **kwargs)
if emails:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._create(target_class, *args, **kwargs)
if emails and not instance.pk:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@factory.post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@factory.post_generation
def set_emails(self, create, extracted):
if not self.emails.filter(address=self.username).exists():
if not self.id:
if create:
# Perform implicit save to populate M2M
self.save()
else:
# This might lead to strange behavior
return
self.emails.create(address=str(self.username).lower())
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@factory.post_generation
def add_auth(self, create, extracted):
self.auth = (self.username, 'queenfan86')
class AuthFactory(factory.base.Factory):
class Meta:
model = Auth
user = factory.SubFactory(UserFactory)
class UnregUserFactory(DjangoModelFactory):
email = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
date_registered = factory.Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
return ret
@classmethod
def _create(cls, target_class, *args, **kwargs):
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
ret.save()
return ret
class UnconfirmedUserFactory(DjangoModelFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = models.OSFUser
username = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
password = 'lolomglgt'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
return instance
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
instance.save()
return instance
class BaseNodeFactory(DjangoModelFactory):
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.Node
class ProjectFactory(BaseNodeFactory):
category = 'project'
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
class NodeFactory(BaseNodeFactory):
category = 'hypothesis'
parent = factory.SubFactory(ProjectFactory)
class InstitutionFactory(DjangoModelFactory):
name = factory.Faker('company')
login_url = factory.Faker('url')
logout_url = factory.Faker('url')
domains = FakeList('url', n=3)
email_domains = FakeList('domain_name', n=1)
logo_name = factory.Faker('file_name')
class Meta:
model = models.Institution
class NodeLicenseRecordFactory(DjangoModelFactory):
year = factory.Faker('year')
copyright_holders = FakeList('name', n=3)
class Meta:
model = models.NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
kwargs['node_license'] = kwargs.get(
'node_license',
models.NodeLicense.objects.get(name='No license')
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class NodeLogFactory(DjangoModelFactory):
class Meta:
model = models.NodeLog
action = 'file_added'
params = {'path': '/'}
user = SubFactory(UserFactory)
class PrivateLinkFactory(DjangoModelFactory):
class Meta:
model = models.PrivateLink
name = factory.Faker('word')
key = factory.Faker('md5')
anonymous = False
creator = factory.SubFactory(UserFactory)
class CollectionFactory(DjangoModelFactory):
class Meta:
model = models.Collection
is_bookmark_collection = False
title = factory.Faker('catch_phrase')
creator = factory.SubFactory(UserFactory)
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class RegistrationFactory(BaseNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, data=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
*args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.pop('user', None) or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be registered
project = project or target_class(*args, **kwargs)
if project.has_permission(user, 'admin'):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
archive_job = reg.archive_job
archive_job.status = ARCHIVER_SUCCESS
archive_job.done = True
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
if is_public:
reg.is_public = True
reg.save()
return reg
class WithdrawnRegistrationFactory(BaseNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
token = withdrawal.approval_state.values()[0]['approval_token']
with patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
return withdrawal
class SanctionFactory(DjangoModelFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.pop('user', None) or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = super(SanctionFactory, cls)._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = models.Retraction
user = factory.SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = models.Embargo
user = factory.SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = models.RegistrationApproval
user = factory.SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(DjangoModelFactory):
FACTORY_STRATEGY = factory.base.CREATE_STRATEGY
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or UserFactory()
if not embargo:
embargo = EmbargoFactory(state=models.Sanction.APPROVED, approve=True)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('osf.models.sanctions.TokenApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(Auth(user))
return approval
class DraftRegistrationFactory(DjangoModelFactory):
class Meta:
model = models.DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
registration_schema = registration_schema or models.MetaSchema.objects.first()
registration_metadata = registration_metadata or {}
draft = models.DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class CommentFactory(DjangoModelFactory):
class Meta:
model = models.Comment
content = factory.Sequence(lambda n: 'Comment {0}'.format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class SubjectFactory(DjangoModelFactory):
text = factory.Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = models.Subject
@classmethod
def _create(cls, target_class, parent=None, provider=None, bepress_subject=None, *args, **kwargs):
provider = provider or models.PreprintProvider.objects.first() or PreprintProviderFactory(_id='osf')
if provider._id != 'osf' and not bepress_subject:
osf = models.PreprintProvider.load('osf') or PreprintProviderFactory(_id='osf')
bepress_subject = SubjectFactory(provider=osf)
try:
ret = super(SubjectFactory, cls)._create(target_class, parent=parent, provider=provider, bepress_subject=bepress_subject, *args, **kwargs)
except IntegrityError:
ret = models.Subject.objects.get(text=kwargs['text'])
if parent:
ret.parent = parent
return ret
class PreprintProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.PreprintProvider
@classmethod
def _build(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._build(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._create(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
instance.save()
return instance
def sync_set_identifiers(preprint):
ezid_return_value ={
'response': {
'success': '{doi}osf.io/{guid} | {ark}osf.io/{guid}'.format(
doi=settings.DOI_NAMESPACE, ark=settings.ARK_NAMESPACE, guid=preprint._id
)
},
'already_exists': False
}
id_dict = parse_identifiers(ezid_return_value)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
class PreprintFactory(DjangoModelFactory):
class Meta:
model = models.PreprintService
doi = factory.Sequence(lambda n: '10.123/{}'.format(n))
provider = factory.SubFactory(PreprintProviderFactory)
@classmethod
def _build(cls, target_class, *args, **kwargs):
creator = kwargs.pop('creator', None) or UserFactory()
project = kwargs.pop('project', None) or ProjectFactory(creator=creator)
provider = kwargs.pop('provider', None) or PreprintProviderFactory()
instance = target_class(node=project, provider=provider)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
update_task_patcher = mock.patch('website.preprints.tasks.on_preprint_updated.si')
update_task_patcher.start()
finish = kwargs.pop('finish', True)
is_published = kwargs.pop('is_published', True)
instance = cls._build(target_class, *args, **kwargs)
doi = kwargs.pop('doi', None)
license_details = kwargs.pop('license_details', None)
filename = kwargs.pop('filename', None) or 'preprint_file.txt'
subjects = kwargs.pop('subjects', None) or [[SubjectFactory()._id]]
instance.node.preprint_article_doi = doi
instance.reviews_state = kwargs.pop('reviews_state', 'initial')
user = kwargs.pop('creator', None) or instance.node.creator
if not instance.node.is_contributor(user):
instance.node.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=True
)
preprint_file = OsfStorageFile.create(
node=instance.node,
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
preprint_file.save()
from addons.osfstorage import settings as osfstorage_settings
preprint_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
if finish:
auth = Auth(user)
instance.set_primary_file(preprint_file, auth=auth, save=True)
instance.set_subjects(subjects, auth=auth)
if license_details:
instance.set_preprint_license(license_details, auth=auth)
create_task_patcher = mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si')
mock_create_identifier = create_task_patcher.start()
if is_published:
mock_create_identifier.side_effect = sync_set_identifiers(instance)
instance.set_published(is_published, auth=auth)
create_task_patcher.stop()
if not instance.is_published:
instance.node._has_abandoned_preprint = True
instance.node.save()
instance.save()
return instance
class TagFactory(DjangoModelFactory):
class Meta:
model = models.Tag
name = factory.Faker('word')
system = False
class ApiOAuth2PersonalTokenFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2PersonalToken
owner = factory.SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = factory.Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class ApiOAuth2ApplicationFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Application
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ForkFactory(DjangoModelFactory):
class Meta:
model = models.Node
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class IdentifierFactory(DjangoModelFactory):
class Meta:
model = models.Identifier
referent = factory.SubFactory(RegistrationFactory)
value = factory.Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
class NodeRelationFactory(DjangoModelFactory):
class Meta:
model = models.NodeRelation
child = factory.SubFactory(NodeFactory)
parent = factory.SubFactory(NodeFactory)
class ExternalAccountFactory(DjangoModelFactory):
class Meta:
model = models.ExternalAccount
oauth_key = 'some-silly-key'
oauth_secret = 'some-super-secret'
provider = 'mock2'
provider_id = factory.Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = factory.Sequence(lambda n: 'user-{0}'.format(n))
profile_url = 'http://wutwut.com/'
refresh_token = 'some-sillier-key'
class MockOAuth2Provider(models.ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
auto_refresh_url = "https://mock2.com/callback"
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class NotificationSubscriptionFactory(DjangoModelFactory):
class Meta:
model = models.NotificationSubscription
def make_node_lineage():
node1 = NodeFactory()
node2 = NodeFactory(parent=node1)
node3 = NodeFactory(parent=node2)
node4 = NodeFactory(parent=node3)
return [node1._id, node2._id, node3._id, node4._id]
class NotificationDigestFactory(DjangoModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
node_lineage = FuzzyAttribute(fuzzer=make_node_lineage)
user = factory.SubFactory(UserFactory)
send_type = FuzzyChoice(choices=NOTIFICATION_TYPES.keys())
message = fake.text(max_nb_chars=2048)
event = fake.text(max_nb_chars=50)
class Meta:
model = models.NotificationDigest
class ConferenceFactory(DjangoModelFactory):
class Meta:
model = models.Conference
endpoint = factory.Sequence(lambda n: 'conference{0}'.format(n))
name = factory.Faker('catch_phrase')
active = True
is_meeting = True
@factory.post_generation
def admins(self, create, extracted, **kwargs):
self.admins = extracted or [UserFactory()]
class SessionFactory(DjangoModelFactory):
class Meta:
model = models.Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class ArchiveJobFactory(DjangoModelFactory):
class Meta:
model = models.ArchiveJob
class ActionFactory(DjangoModelFactory):
class Meta:
model = models.Action
trigger = FuzzyChoice(choices=workflow.Triggers.values())
comment = factory.Faker('text')
from_state = FuzzyChoice(choices=workflow.States.values())
to_state = FuzzyChoice(choices=workflow.States.values())
target = factory.SubFactory(PreprintFactory)
creator = factory.SubFactory(AuthUserFactory)
is_deleted = False
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ImageProcessingOperations(object):
"""ImageProcessingOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_rasterize_job_initial(
self,
job_id, # type: str
job=None, # type: Optional["_models.ImageProcessingRasterizeJob"]
**kwargs # type: Any
):
# type: (...) -> "_models.ImageProcessingRasterizeJob"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageProcessingRasterizeJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_rasterize_job_initial.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if job is not None:
body_content = self._serialize.body(job, 'ImageProcessingRasterizeJob')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ImageProcessingRasterizeJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_rasterize_job_initial.metadata = {'url': '/image-processing/rasterize/{jobId}'} # type: ignore
def begin_create_rasterize_job(
self,
job_id, # type: str
job=None, # type: Optional["_models.ImageProcessingRasterizeJob"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ImageProcessingRasterizeJob"]
"""Create a ImageProcessing Rasterize job.
:param job_id: JobId provided by user.
:type job_id: str
:param job: Job parameters supplied by user.
:type job: ~azure.agrifood.farming.models.ImageProcessingRasterizeJob
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ImageProcessingRasterizeJob or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.agrifood.farming.models.ImageProcessingRasterizeJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageProcessingRasterizeJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_rasterize_job_initial(
job_id=job_id,
job=job,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ImageProcessingRasterizeJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
if polling is True: polling_method = LROBasePolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_rasterize_job.metadata = {'url': '/image-processing/rasterize/{jobId}'} # type: ignore
def get_rasterize_job(
self,
job_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ImageProcessingRasterizeJob"
"""Get ImageProcessing Rasterize job's details.
:param job_id: ID of the job.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImageProcessingRasterizeJob, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.ImageProcessingRasterizeJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageProcessingRasterizeJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get_rasterize_job.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ImageProcessingRasterizeJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_rasterize_job.metadata = {'url': '/image-processing/rasterize/{jobId}'} # type: ignore
| |
import datetime
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = ordering_field_columns.keys().index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return mark_safe(u'<img src="%s" alt="%s" />' % (icon_url, field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == u'action_checkbox':
row_class = ' class="action-checkbox"'
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = ' class="nowrap"'
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.query_set.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| |
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
from .aol import AolIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
)
from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .criterion import CriterionIE
from .crunchyroll import CrunchyrollIE
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .eitb import EitbIE
from .elpais import ElPaisIE
from .engadget import EngadgetIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .fourtube import FourTubeIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gamespot import GameSpotIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .hark import HarkIE
from .helsinki import HelsinkiIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jukebox import JukeboxIE
from .justintv import JustinTVIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .la7 import LA7IE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import LivestreamIE, LivestreamOriginalIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mixcloud import MixcloudIE
from .mpora import MporaIE
from .mofosex import MofosexIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motorsport import MotorsportIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVIggyIE,
)
from .musicplayon import MusicPlayOnIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
)
from .ndr import NDRIE
from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE
from .nfb import NFBIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .ntv import NTVIE
from .oe1 import OE1IE
from .ooyala import OoyalaIE
from .orf import ORFIE
from .parliamentliveuk import ParliamentLiveUKIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtlnow import RTLnowIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .savefrom import SaveFromIE
from .servingsys import ServingSysIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .sohu import SohuIE
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
from .southparkstudios import (
SouthParkStudiosIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spike import SpikeIE
from .stanfordoc import StanfordOpenClassroomIE
from .statigram import StatigramIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theplatform import ThePlatformIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videopremium import VideoPremiumIE
from .videoweed import VideoWeedIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoGroupsIE,
VimeoReviewIE,
)
from .vine import VineIE
from .viki import VikiIE
from .vk import VKIE
from .vube import VubeIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wdr import (
WDRIE,
WDRMausIE,
)
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .xbef import XBefIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .yahoo import (
YahooIE,
YahooNewsIE,
YahooSearchIE,
)
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTopListIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE']
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sh
from stackalytics.openstack.common import log as logging
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
class Vcs(object):
def __init__(self, repo, sources_root):
self.repo = repo
self.sources_root = sources_root
if not os.path.exists(sources_root):
os.mkdir(sources_root)
else:
if not os.access(sources_root, os.W_OK):
raise Exception('Sources root folder %s is not writable' %
sources_root)
def fetch(self):
pass
def get_release_index(self):
pass
def log(self, branch, head_commit_id):
pass
def get_last_id(self, branch):
pass
GIT_LOG_PARAMS = [
('commit_id', '%H'),
('date', '%at'),
('author_name', '%an'),
('author_email', '%ae'),
('subject', '%s'),
('message', '%b'),
]
GIT_LOG_FORMAT = ''.join([(r[0] + ':' + r[1] + '%n')
for r in GIT_LOG_PARAMS]) + 'diff_stat:'
DIFF_STAT_PATTERN = ('[^\d]+(\d+)\s+[^\s]*\s+changed'
'(,\s+(\d+)\s+([^\d\s]*)\s+(\d+)?)?')
GIT_LOG_PATTERN = re.compile(''.join([(r[0] + ':(.*?)\n')
for r in GIT_LOG_PARAMS]) +
'diff_stat:' + DIFF_STAT_PATTERN,
re.DOTALL)
MESSAGE_PATTERNS = {
'bug_id': re.compile(r'bug[\s#:]*(?P<id>\d+)', re.IGNORECASE),
'blueprint_id': re.compile(r'\b(?:blueprint|bp)\b[ \t]*[#:]?[ \t]*'
r'(?P<id>[a-z0-9-]+)', re.IGNORECASE),
'change_id': re.compile('Change-Id: (?P<id>I[0-9a-f]{40})', re.IGNORECASE),
}
class Git(Vcs):
def __init__(self, repo, sources_root):
super(Git, self).__init__(repo, sources_root)
uri = self.repo['uri']
match = re.search(r'([^\/]+)\.git$', uri)
if match:
self.folder = os.path.normpath(self.sources_root + '/' +
match.group(1))
else:
raise Exception('Unexpected uri %s for git' % uri)
self.release_index = {}
def _checkout(self, branch):
try:
sh.git('checkout', 'origin/' + branch)
return True
except sh.ErrorReturnCode as e:
LOG.error('Unable to checkout branch %(branch)s from repo '
'%(uri)s. Ignore it',
{'branch': branch, 'uri': self.repo['uri']})
LOG.exception(e)
return False
def fetch(self):
LOG.debug('Fetching repo uri %s' % self.repo['uri'])
if os.path.exists(self.folder):
os.chdir(self.folder)
uri = str(sh.git('config', '--get', 'remote.origin.url')).strip()
if uri != self.repo['uri']:
LOG.debug('Repo uri %(uri)s differs from cloned %(old)s',
{'uri': self.repo['uri'], 'old': uri})
os.chdir('..')
shutil.rmtree(self.folder)
if not os.path.exists(self.folder):
os.chdir(self.sources_root)
try:
sh.git('clone', self.repo['uri'])
except sh.ErrorReturnCode as e:
LOG.error('Unable to clone git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
os.chdir(self.folder)
else:
os.chdir(self.folder)
try:
sh.git('fetch')
except sh.ErrorReturnCode as e:
LOG.error('Unable to fetch git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
self.get_release_index()
def get_release_index(self):
if not os.path.exists(self.folder):
return {}
LOG.debug('Get release index for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self.release_index:
for release in self.repo['releases']:
release_name = release['release_name'].lower()
if 'branch' in release:
branch = release['branch']
else:
branch = 'master'
if not self._checkout(branch):
continue
if 'tag_from' in release:
tag_range = release['tag_from'] + '..' + release['tag_to']
else:
tag_range = release['tag_to']
git_log_iterator = sh.git('log', '--pretty=%H', tag_range,
_tty_out=False)
for commit_id in git_log_iterator:
self.release_index[commit_id.strip()] = release_name
return self.release_index
def log(self, branch, head_commit_id):
LOG.debug('Parsing git log for repo uri %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return
commit_range = 'HEAD'
if head_commit_id:
commit_range = head_commit_id + '..HEAD'
output = sh.git('log', '--pretty=%s' % GIT_LOG_FORMAT, '--shortstat',
'-M', '--no-merges', commit_range, _tty_out=False,
_decode_errors='ignore')
for rec in re.finditer(GIT_LOG_PATTERN, str(output)):
i = 1
commit = {}
for param in GIT_LOG_PARAMS:
commit[param[0]] = unicode(rec.group(i), 'utf8')
i += 1
if not utils.check_email_validity(commit['author_email']):
continue
commit['files_changed'] = int(rec.group(i))
i += 1
lines_changed_group = rec.group(i)
i += 1
lines_changed = rec.group(i)
i += 1
deleted_or_inserted = rec.group(i)
i += 1
lines_deleted = rec.group(i)
i += 1
if lines_changed_group: # there inserted or deleted lines
if not lines_deleted:
if deleted_or_inserted[0] == 'd': # deleted
lines_deleted = lines_changed
lines_changed = 0
commit['lines_added'] = int(lines_changed or 0)
commit['lines_deleted'] = int(lines_deleted or 0)
for pattern_name, pattern in MESSAGE_PATTERNS.iteritems():
collection = set()
for item in re.finditer(pattern, commit['message']):
collection.add(item.group('id'))
commit[pattern_name] = list(collection)
commit['date'] = int(commit['date'])
commit['module'] = self.repo['module']
commit['branches'] = set([branch])
if commit['commit_id'] in self.release_index:
commit['release'] = self.release_index[commit['commit_id']]
else:
commit['release'] = None
if 'blueprint_id' in commit:
commit['blueprint_id'] = [(commit['module'] + ':' + bp_name)
for bp_name
in commit['blueprint_id']]
yield commit
def get_last_id(self, branch):
LOG.debug('Get head commit for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return None
return str(sh.git('rev-parse', 'HEAD')).strip()
def get_vcs(repo, sources_root):
uri = repo['uri']
LOG.debug('Factory is asked for VCS uri: %s', uri)
match = re.search(r'\.git$', uri)
if match:
return Git(repo, sources_root)
else:
LOG.warning('Unsupported VCS, fallback to dummy')
return Vcs(repo, uri)
| |
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import op_def_library
def abort(error_msg=None, name=None):
r"""Raise a exception to abort the process when called.
Returns nothing but an exception.
Args:
error_msg: An optional `string`. Defaults to `""`.
A string which is the message associated with the exception.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return _op_def_lib.apply_op("Abort", error_msg=error_msg, name=name)
def control_trigger(name=None):
r"""Does nothing. Serves as a control trigger for scheduling. Only useful as a
placeholder for control edges.
Args:
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return _op_def_lib.apply_op("ControlTrigger", name=name)
def enter(data, frame_name, is_constant=None, parallel_iterations=None,
name=None):
r"""Creates or finds a child frame, and makes `data` available to the child frame.
This op is used together with `Exit` to create loops in the graph.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `output` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations` iterations
are run in parallel in the child frame.
Args:
data: A `Tensor`. The tensor to be made available to the child frame.
frame_name: A `string`. The name of the child frame.
is_constant: An optional `bool`. Defaults to `False`.
If true, the output is constant within the child frame.
parallel_iterations: An optional `int`. Defaults to `10`.
The number of iterations allowed to run in parallel.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`. The same tensor as `data`.
"""
return _op_def_lib.apply_op("Enter", data=data, frame_name=frame_name,
is_constant=is_constant,
parallel_iterations=parallel_iterations,
name=name)
def _exit(data, name=None):
r"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: A `Tensor`. The tensor to be made available to the parent frame.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`. The same tensor as `data`.
"""
return _op_def_lib.apply_op("Exit", data=data, name=name)
def loop_cond(input, name=None):
r"""Forwards the input to the output.
This operator represents the loop termination condition used by the
"pivot" switches of a loop.
Args:
input: A `Tensor` of type `bool`.
A boolean scalar, representing the branch predicate of the Switch op.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. The same tensor as `input`.
"""
return _op_def_lib.apply_op("LoopCond", input=input, name=name)
def _merge(inputs, name=None):
r"""Forwards the value of an available tensor from `inputs` to `output`.
`Merge` waits for at least one of the tensors in `inputs` to become available.
It is usually combined with `Switch` to implement branching.
`Merge` forwards the first tensor for become available to `output`, and sets
`value_index` to its index in `inputs`.
It is an error if more than one tensor in `inputs` is available.
Args:
inputs: A list of at least 1 `Tensor` objects of the same type.
The input tensors, exactly one of which will become available.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, value_index).
output: A `Tensor`. Has the same type as `inputs`. Will be set to the available input tensor.
value_index: A `Tensor` of type `int32`. The index of the chosen input tensor in `inputs`.
"""
return _op_def_lib.apply_op("Merge", inputs=inputs, name=name)
def next_iteration(data, name=None):
r"""Makes its input available to the next iteration.
Args:
data: A `Tensor`. The tensor to be made available to the next iteration.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`. The same tensor as `data`.
"""
return _op_def_lib.apply_op("NextIteration", data=data, name=name)
def no_op(name=None):
r"""Does nothing. Only useful as a placeholder for control edges.
Args:
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return _op_def_lib.apply_op("NoOp", name=name)
def ref_enter(data, frame_name, is_constant=None, parallel_iterations=None,
name=None):
r"""Creates or finds a child frame, and makes `data` available to the child frame.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `output` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations` iterations
are run in parallel in the child frame.
Args:
data: A mutable `Tensor`.
The tensor to be made available to the child frame.
frame_name: A `string`. The name of the child frame.
is_constant: An optional `bool`. Defaults to `False`.
If true, the output is constant within the child frame.
parallel_iterations: An optional `int`. Defaults to `10`.
The number of iterations allowed to run in parallel.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `data`.
The same tensor as `data`.
"""
return _op_def_lib.apply_op("RefEnter", data=data, frame_name=frame_name,
is_constant=is_constant,
parallel_iterations=parallel_iterations,
name=name)
def _ref_exit(data, name=None):
r"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: A mutable `Tensor`.
The tensor to be made available to the parent frame.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `data`.
The same tensor as `data`.
"""
return _op_def_lib.apply_op("RefExit", data=data, name=name)
def _ref_merge(inputs, name=None):
r"""Forwards the value of an available tensor from `inputs` to `output`.
`Merge` waits for at least one of the tensors in `inputs` to become available.
It is usually combined with `Switch` to implement branching.
`Merge` forwards the first tensor for become available to `output`, and sets
`value_index` to its index in `inputs`.
It is an error if more than one tensor in `inputs` is available.
Args:
inputs: A list of at least 1 mutable `Tensor` objects of the same type.
The input tensors, exactly one of which will become available.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, value_index).
output: A mutable `Tensor`. Has the same type as `inputs`. Will be set to the available input tensor.
value_index: A `Tensor` of type `int32`. The index of the chosen input tensor in `inputs`.
"""
return _op_def_lib.apply_op("RefMerge", inputs=inputs, name=name)
def ref_next_iteration(data, name=None):
r"""Makes its input available to the next iteration.
Args:
data: A mutable `Tensor`.
The tensor to be made available to the next iteration.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `data`.
The same tensor as `data`.
"""
return _op_def_lib.apply_op("RefNextIteration", data=data, name=name)
def ref_select(index, inputs, name=None):
r"""Forwards the `index`th element of `inputs` to `output`.
Args:
index: A `Tensor` of type `int32`.
A scalar that determines the input that gets selected.
inputs: A list of at least 1 mutable `Tensor` objects of the same type.
A list of ref tensors, one of which will be forwarded to `output`.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `inputs`. The forwarded tensor.
"""
return _op_def_lib.apply_op("RefSelect", index=index, inputs=inputs,
name=name)
def ref_switch(data, pred, name=None):
r"""Forwards the ref tensor `data` to the output port determined by `pred`.
If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
the data goes to `output_false`.
See also `Switch` and `Merge`.
Args:
data: A mutable `Tensor`.
The ref tensor to be forwarded to the appropriate output.
pred: A `Tensor` of type `bool`.
A scalar that specifies which output port will receive data.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_false, output_true).
output_false: A mutable `Tensor`. Has the same type as `data`. If `pred` is false, data will be forwarded to this output.
output_true: A mutable `Tensor`. Has the same type as `data`. If `pred` is true, data will be forwarded to this output.
"""
return _op_def_lib.apply_op("RefSwitch", data=data, pred=pred, name=name)
def _switch(data, pred, name=None):
r"""Forwards `data` to the output port determined by `pred`.
If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
the data goes to `output_false`.
See also `RefSwitch` and `Merge`.
Args:
data: A `Tensor`. The tensor to be forwarded to the appropriate output.
pred: A `Tensor` of type `bool`.
A scalar that specifies which output port will receive data.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_false, output_true).
output_false: A `Tensor`. Has the same type as `data`. If `pred` is false, data will be forwarded to this output.
output_true: A `Tensor`. Has the same type as `data`. If `pred` is true, data will be forwarded to this output.
"""
return _op_def_lib.apply_op("Switch", data=data, pred=pred, name=name)
def _InitOpDefLibrary():
op_list = op_def_pb2.OpList()
text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
op_def_registry.register_op_list(op_list)
op_def_lib = op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "Abort"
attr {
name: "error_msg"
type: "string"
default_value {
s: ""
}
}
}
op {
name: "ControlTrigger"
}
op {
name: "Enter"
input_arg {
name: "data"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "frame_name"
type: "string"
}
attr {
name: "is_constant"
type: "bool"
default_value {
b: false
}
}
attr {
name: "parallel_iterations"
type: "int"
default_value {
i: 10
}
}
}
op {
name: "Exit"
input_arg {
name: "data"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "LoopCond"
input_arg {
name: "input"
type: DT_BOOL
}
output_arg {
name: "output"
type: DT_BOOL
}
}
op {
name: "Merge"
input_arg {
name: "inputs"
type_attr: "T"
number_attr: "N"
}
output_arg {
name: "output"
type_attr: "T"
}
output_arg {
name: "value_index"
type: DT_INT32
}
attr {
name: "T"
type: "type"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
}
op {
name: "NextIteration"
input_arg {
name: "data"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "NoOp"
}
op {
name: "RefEnter"
input_arg {
name: "data"
type_attr: "T"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "frame_name"
type: "string"
}
attr {
name: "is_constant"
type: "bool"
default_value {
b: false
}
}
attr {
name: "parallel_iterations"
type: "int"
default_value {
i: 10
}
}
}
op {
name: "RefExit"
input_arg {
name: "data"
type_attr: "T"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
}
op {
name: "RefMerge"
input_arg {
name: "inputs"
type_attr: "T"
number_attr: "N"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
is_ref: true
}
output_arg {
name: "value_index"
type: DT_INT32
}
attr {
name: "T"
type: "type"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
}
op {
name: "RefNextIteration"
input_arg {
name: "data"
type_attr: "T"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
}
op {
name: "RefSelect"
input_arg {
name: "index"
type: DT_INT32
}
input_arg {
name: "inputs"
type_attr: "T"
number_attr: "N"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
}
op {
name: "RefSwitch"
input_arg {
name: "data"
type_attr: "T"
is_ref: true
}
input_arg {
name: "pred"
type: DT_BOOL
}
output_arg {
name: "output_false"
type_attr: "T"
is_ref: true
}
output_arg {
name: "output_true"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
}
op {
name: "Switch"
input_arg {
name: "data"
type_attr: "T"
}
input_arg {
name: "pred"
type: DT_BOOL
}
output_arg {
name: "output_false"
type_attr: "T"
}
output_arg {
name: "output_true"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2017 AT&T Intellectual Property.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import copy
import yaml
from oslo_utils import strutils
import six
from six.moves.urllib import parse
def getid(obj):
"""Get object's ID or object.
Abstracts the common pattern of allowing both an object or an object's ID
as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
def prepare_query_string(params):
"""Convert dict params to query string"""
# Transform the dict to a sequence of two-element tuples in fixed
# order, then the encoded string will be consistent in Python 2&3.
if not params:
return ''
params = sorted(params.items(), key=lambda x: x[0])
return '?%s' % parse.urlencode(params) if params else ''
def get_url_with_filter(url, filters):
query_string = prepare_query_string(filters)
url = "%s%s" % (url, query_string)
return url
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
:param resp: Response or list of Response objects
"""
self.manager = manager
self._info = info or {}
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def api_version(self):
return self.manager.api_version
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in info.items():
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def __ne__(self, other):
# Using not of '==' implementation because the not of
# __eq__, when it returns NotImplemented, is returning False.
return not self == other
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def set_info(self, key, value):
self._info[key] = value
def to_dict(self):
return copy.deepcopy(self._info)
class Manager(object):
"""Manager for API service.
Managers interact with a particular type of API (buckets, revisions, etc.)
and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, api):
self.api = api
@property
def client(self):
return self.api.client
@property
def api_version(self):
return self.api.api_version
def _to_dict(self, body, many=False):
"""Convert YAML-formatted response body into dict or list.
:param body: YAML-formatted response body to convert.
:param many: Controls whether to return list or dict. If True, returns
list, else dict. False by default.
:rtype: dict or list
"""
try:
return (
list(yaml.safe_load_all(body))
if many else yaml.safe_load(body)
)
except yaml.YAMLError:
return None
def _list(self, url, response_key=None, obj_class=None, body=None,
filters=None):
if filters:
url = get_url_with_filter(url, filters)
if body:
resp, body = self.api.client.post(url, body=body)
else:
resp, body = self.api.client.get(url)
body = self._to_dict(body, many=True)
if obj_class is None:
obj_class = self.resource_class
if response_key is not None:
data = body[response_key]
else:
data = body
items = [obj_class(self, res, loaded=True)
for res in data if res]
return items
def _get(self, url, response_key=None, filters=None):
if filters:
url = get_url_with_filter(url, filters)
resp, body = self.api.client.get(url)
body = self._to_dict(body)
if response_key is not None:
content = body[response_key]
else:
content = body
return self.resource_class(self, content, loaded=True)
def _create(self, url, data, response_key=None):
if isinstance(data, six.string_types):
resp, body = self.api.client.post(url, body=data)
else:
resp, body = self.api.client.post(url, data=data)
body = self._to_dict(body)
if body:
if response_key:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
else:
return body
def _delete(self, url):
resp, body = self.api.client.delete(url)
body = self._to_dict(body)
return body
def _update(self, url, data, response_key=None):
if isinstance(data, six.string_types):
resp, body = self.api.client.put(url, body=data)
else:
resp, body = self.api.client.put(url, data=data)
body = self._to_dict(body)
if body:
if response_key:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
else:
return body
| |
# Compatibility with Python 2
from __future__ import print_function
from scipy import sparse
import numpy as np
def write_int(f, x, name, *args):
if any(args):
for arg in args:
f.write("%s->" % arg)
f.write("%s = %i;\n" % (name, x))
else:
f.write("c_int %s = %i;\n" % (name, x))
def write_float(f, x, name, *args):
if any(args):
for arg in args:
f.write("%s->" % arg)
f.write("%s = %.20f;\n" % (name, x))
else:
f.write("c_float %s = %.20f;\n" % (name, x))
def write_vec_int(f, x, name, *args):
n = len(x)
if any(args):
for arg in args:
f.write("%s->" % arg)
else:
f.write("c_int * ")
f.write("%s = (c_int*) c_malloc(%i * sizeof(c_int));\n" % (name, n))
for i in range(n):
for arg in args:
f.write("%s->" % arg)
f.write("%s[%i] = " % (name, i))
f.write("%i;\n" % x[i])
def write_vec_float(f, x, name, *args):
n = len(x)
if any(args):
for arg in args:
f.write("%s->" % arg)
else:
f.write("c_float * ")
f.write("%s = (c_float*) c_malloc(%i * sizeof(c_float));\n" % (name, n))
for i in range(n):
for arg in args:
f.write("%s->" % arg)
f.write("%s[%i] = " % (name, i))
if x[i] == np.inf:
f.write("OSQP_INFTY;\n")
elif x[i] == -np.inf:
f.write("-OSQP_INFTY;\n")
else:
f.write("%.20f;\n" % x[i])
def clean_vec(f, name, *args):
f.write("c_free(")
if any(args):
for arg in args:
f.write("%s->" % arg)
# else:
# f.write("c_float * ")
f.write("%s);\n" % name)
def write_mat_sparse(f, A, name, *args):
m = A.shape[0]
n = A.shape[1]
f.write("\n// Matrix " + name + "\n")
f.write("//")
f.write("-"*(len("Matrix ") + len(name)) + "\n")
# Allocate Matrix
if any(args):
for arg in args:
f.write("%s->" % arg)
else:
f.write("csc * ")
f.write(name + " = (csc*) c_malloc(sizeof(csc));\n")
# Write dimensions and number of nonzeros
if any(args):
write_int(f, m, "m", args, name)
write_int(f, n, "n", args, name)
write_int(f, -1, "nz", args, name)
write_int(f, A.nnz, "nzmax", args, name)
else:
write_int(f, m, "m", name)
write_int(f, n, "n", name)
write_int(f, -1, "nz", name)
write_int(f, A.nnz, "nzmax", name)
for arg in args:
f.write("%s->" % arg)
if min(m,n) == 0:
f.write("%s->x = OSQP_NULL;\n" % name)
else:
f.write("%s->" % name)
f.write("x = (c_float*) c_malloc(%i * sizeof(c_float));\n" % A.nnz)
for i in range(A.nnz):
for arg in args:
f.write("%s->" % arg)
f.write("%s->" % name)
f.write("x[%i] = %.20f;\n" % (i, A.data[i]))
for arg in args:
f.write("%s->" % arg)
if min(m,n) == 0:
f.write("%s->i = OSQP_NULL;\n" % name)
else:
f.write("%s->" % name)
f.write("i = (c_int*) c_malloc(%i * sizeof(c_int));\n" % A.nnz)
for i in range(A.nnz):
for arg in args:
f.write("%s->" % arg)
f.write("%s->" % name)
f.write("i[%i] = %i;\n" % (i, A.indices[i]))
for arg in args:
f.write("%s->" % arg)
f.write("%s->" % name)
f.write("p = (c_int*) c_malloc((%i + 1) * sizeof(c_int));\n" % n)
for i in range(A.shape[1] + 1):
for arg in args:
f.write("%s->" % arg)
f.write("%s->" % name)
f.write("p[%i] = %i;\n" % (i, A.indptr[i]))
# Do the same for i and p
f.write("\n")
def clean_mat(f, name, *args):
# Clean data vector
f.write("c_free(")
if any(args):
for arg in args:
f.write("%s->" % arg)
f.write("%s->x);\n" % name)
# Clean index vector
f.write("c_free(")
if any(args):
for arg in args:
f.write("%s->" % arg)
f.write("%s->i);\n" % name)
# Clean col pointer vector
f.write("c_free(")
if any(args):
for arg in args:
f.write("%s->" % arg)
f.write("%s->p);\n" % name)
# Clean matrix
f.write("c_free(")
if any(args):
for arg in args:
f.write("%s->" % arg)
f.write("%s);\n" % name)
def generate_problem_data(P, q, A, l, u, problem_name, sols_data={}):
"""
Generate test problem data.
The additional structure sols_data defines the additional vectors/scalars
we need to perform the tests
"""
# Get problem dimension
n = P.shape[0]
m = A.shape[0]
#
# GENERATE HEADER FILE
#
f = open(problem_name + "/data.h", "w")
# Add definition check
f.write("#ifndef " + problem_name.upper() + "_DATA_H\n")
f.write("#define " + problem_name.upper() + "_DATA_H\n")
# Add Includes
f.write("#include \"osqp.h\"\n")
f.write("\n\n")
#
# Create additional data structure
#
f.write("/* create additional data and solutions structure */\n")
f.write("typedef struct {\n")
# Generate further data and solutions
for key, value in sols_data.items():
if isinstance(value, str):
# Status test get from C code
f.write("c_int %s;\n" % key)
# Check if it is an array or a scalar
elif isinstance(value, np.ndarray):
if isinstance(value.flatten(order='F')[0], int):
f.write("c_int * %s;\n" % key)
elif isinstance(value.flatten(order='F')[0], float):
f.write("c_float * %s;\n" % key)
else:
if isinstance(value, int):
f.write("c_int %s;\n" % key)
elif isinstance(value, float):
f.write("c_float %s;\n" % key)
f.write("} %s_sols_data;\n\n" % problem_name)
# prototypes
f.write("/* function prototypes */\n")
f.write("OSQPData * generate_problem_%s();\n" % problem_name)
f.write("void clean_problem_%s(OSQPData * data);\n" % problem_name)
f.write("%s_sols_data * generate_problem_%s_sols_data();\n" % (problem_name, problem_name))
f.write("void clean_problem_%s_sols_data(%s_sols_data * data);\n" % (problem_name, problem_name))
f.write("\n\n")
#
# Generate QP problem data
#
f.write("/* function to generate QP problem data */\n")
f.write("OSQPData * generate_problem_%s(){\n\n" % problem_name)
# Initialize structure data
f.write("OSQPData * data = (OSQPData *)c_malloc(sizeof(OSQPData));\n\n")
# Write problem dimensions
f.write("// Problem dimensions\n")
write_int(f, n, "n", "data")
write_int(f, m, "m", "data")
f.write("\n")
# Write problem vectors
f.write("// Problem vectors\n")
write_vec_float(f, l, "l", "data")
write_vec_float(f, u, "u", "data")
write_vec_float(f, q, "q", "data")
f.write("\n")
# Write matrix A
write_mat_sparse(f, A, "A", "data")
write_mat_sparse(f, P, "P", "data")
# Return data and end function
f.write("return data;\n\n")
f.write("}\n\n")
#
# Generate QP problem data
#
f.write("/* function to clean problem data structure */\n")
f.write("void clean_problem_%s(OSQPData * data){\n\n" % problem_name)
# Free vectors
f.write("// Clean vectors\n")
clean_vec(f, "l", "data")
clean_vec(f, "u", "data")
clean_vec(f, "q", "data")
f.write("\n")
# Free matrices
f.write("//Clean Matrices\n")
clean_mat(f, "A", "data")
clean_mat(f, "P", "data")
f.write("\n")
f.write("c_free(data);\n\n")
f.write("}\n\n")
#
# Generate additional problem data for solutions
#
f.write("/* function to define solutions and additional data struct */\n")
f.write("%s_sols_data * generate_problem_%s_sols_data(){\n\n" % (problem_name, problem_name))
# Initialize structure data
f.write("%s_sols_data * data = (%s_sols_data *)c_malloc(sizeof(%s_sols_data));\n\n" % (problem_name, problem_name, problem_name))
# Generate further data and solutions
for key, value in sols_data.items():
if isinstance(value, str):
# Status test get from C code
if value == 'optimal':
f.write("data->%s = %s;\n" % (key, 'OSQP_SOLVED'))
elif value == 'optimal_inaccurate':
f.write("data->%s = %s;\n" % (key, 'OSQP_SOLVED_INACCURATE'))
elif value == 'primal_infeasible':
f.write("data->%s = %s;\n" % (key, 'OSQP_PRIMAL_INFEASIBLE'))
elif value == 'primal_infeasible_inaccurate':
f.write("data->%s = %s;\n" %
(key, 'OSQP_PRIMAL_INFEASIBLE_INACCURATE'))
elif value == 'dual_infeasible':
f.write("data->%s = %s;\n" % (key, 'OSQP_DUAL_INFEASIBLE'))
elif value == 'dual_infeasible_inaccurate':
f.write("data->%s = %s;\n" % (key, 'OSQP_DUAL_INFEASIBLE_INACCURATE'))
# Check if it is an array or a scalar
if type(value) is np.ndarray:
if isinstance(value.flatten(order='F')[0], int):
write_vec_int(f, value.flatten(order='F'), key, "data")
elif isinstance(value.flatten(order='F')[0], float):
write_vec_float(f, value.flatten(order='F'), key, "data")
else:
if isinstance(value, int):
write_int(f, value, key, "data")
elif isinstance(value, float):
write_float(f, value, key, "data")
# Return data and end function
f.write("\nreturn data;\n\n")
f.write("}\n\n")
#
# Clean additional problem data for solutions
#
f.write("/* function to clean solutions and additional data struct */\n")
f.write("void clean_problem_%s_sols_data(%s_sols_data * data){\n\n" % (problem_name, problem_name))
# Generate further data and solutions
for key, value in sols_data.items():
# Check if it is an array or a scalar
if type(value) is np.ndarray:
clean_vec(f, key, "data")
f.write("\nc_free(data);\n\n")
f.write("}\n\n")
f.write("#endif\n")
f.close()
def generate_data(problem_name, sols_data):
"""
Generate test data vectors.
The additional structure sols_data defines the additional vectors/scalars
we need to perform the tests
"""
#
# GENERATE HEADER FILE
#
f = open(problem_name + "/data.h", "w")
# Add definition check
f.write("#ifndef " + problem_name.upper() + "_DATA_H\n")
f.write("#define " + problem_name.upper() + "_DATA_H\n")
# Add Includes
f.write("#include \"osqp.h\"\n")
f.write("\n\n")
#
# Create additional data structure
#
f.write("/* create data and solutions structure */\n")
f.write("typedef struct {\n")
# Generate further data and solutions
for key, value in sols_data.items():
if isinstance(value, str):
# Status test get from C code
f.write("c_int %s;\n" % key)
# Check if it is an array or a scalar
elif sparse.issparse(value): # Sparse matrix
f.write("csc * %s;\n" % key)
elif isinstance(value, np.ndarray):
if isinstance(value.flatten(order='F')[0], int):
f.write("c_int * %s;\n" % key)
elif isinstance(value.flatten(order='F')[0], float):
f.write("c_float * %s;\n" % key)
else:
if isinstance(value, int):
f.write("c_int %s;\n" % key)
elif isinstance(value, float):
f.write("c_float %s;\n" % key)
f.write("} %s_sols_data;\n\n" % problem_name)
# prototypes
f.write("/* function prototypes */\n")
f.write("%s_sols_data * generate_problem_%s_sols_data();\n" % (problem_name, problem_name))
f.write("void clean_problem_%s_sols_data(%s_sols_data * data);\n" % (problem_name, problem_name))
f.write("\n\n")
#
# Generate additional problem data for solutions
#
f.write("/* function to define problem data */\n")
f.write("%s_sols_data * generate_problem_%s_sols_data(){\n\n" % (problem_name, problem_name))
# Initialize structure data
f.write("%s_sols_data * data = (%s_sols_data *)c_malloc(sizeof(%s_sols_data));\n\n" % (problem_name, problem_name, problem_name))
# Generate further data and solutions
for key, value in sols_data.items():
if isinstance(value, str):
# Status test get from C code
if value == 'optimal':
f.write("data->%s = %s;\n" % (key, 'OSQP_SOLVED'))
elif value == 'primal_infeasible':
f.write("data->%s = %s;\n" % (key, 'OSQP_PRIMAL_INFEASIBLE'))
elif value == 'dual_infeasible':
f.write("data->%s = %s;\n" % (key, 'OSQP_DUAL_INFEASIBLE'))
# Check if it is an array or a scalar
elif sparse.issparse(value): # Sparse matrix
write_mat_sparse(f, value, key, "data")
elif type(value) is np.ndarray:
if isinstance(value.flatten(order='F')[0], int):
write_vec_int(f, value.flatten(order='F'), key, "data")
elif isinstance(value.flatten(order='F')[0], float):
write_vec_float(f, value.flatten(order='F'), key, "data")
else:
if isinstance(value, int):
write_int(f, value, key, "data")
elif isinstance(value, float):
write_float(f, value, key, "data")
# Return data and end function
f.write("\nreturn data;\n\n")
f.write("}\n\n")
#
# Clean data
#
f.write("/* function to clean data struct */\n")
f.write("void clean_problem_%s_sols_data(%s_sols_data * data){\n\n" % (problem_name, problem_name))
# Generate further data and solutions
for key, value in sols_data.items():
# Check if it is an array or a scalar
if sparse.issparse(value): # Sparse matrix
clean_mat(f, key, "data")
elif type(value) is np.ndarray:
clean_vec(f, key, "data")
f.write("\nc_free(data);\n\n")
f.write("}\n\n")
f.write("#endif\n")
f.close()
| |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""Routines to generate WSGI responses"""
############################################################
## Headers
############################################################
import warnings
class HeaderDict(dict):
"""
This represents response headers. It handles the headers as a
dictionary, with case-insensitive keys.
Also there is an ``.add(key, value)`` method, which sets the key,
or adds the value to the current value (turning it into a list if
necessary).
For passing to WSGI there is a ``.headeritems()`` method which is
like ``.items()`` but unpacks value that are lists. It also
handles encoding -- all headers are encoded in ASCII (if they are
unicode).
@@: Should that encoding be ISO-8859-1 or UTF-8? I'm not sure
what the spec says.
"""
def __getitem__(self, key):
return dict.__getitem__(self, self.normalize(key))
def __setitem__(self, key, value):
dict.__setitem__(self, self.normalize(key), value)
def __delitem__(self, key):
dict.__delitem__(self, self.normalize(key))
def __contains__(self, key):
return dict.__contains__(self, self.normalize(key))
has_key = __contains__
def get(self, key, failobj=None):
return dict.get(self, self.normalize(key), failobj)
def setdefault(self, key, failobj=None):
return dict.setdefault(self, self.normalize(key), failobj)
def pop(self, key):
return dict.pop(self, self.normalize(key))
def update(self, other):
for key in other:
self[self.normalize(key)] = other[key]
def normalize(self, key):
return str(key).lower().strip()
def add(self, key, value):
key = self.normalize(key)
if key in self:
if isinstance(self[key], list):
self[key].append(value)
else:
self[key] = [self[key], value]
else:
self[key] = value
def headeritems(self):
result = []
for key in self:
if isinstance(self[key], list):
for v in self[key]:
result.append((key, str(v)))
else:
result.append((key, str(self[key])))
return result
#@classmethod
def fromlist(cls, seq):
self = cls()
for name, value in seq:
self.add(name, value)
return self
fromlist = classmethod(fromlist)
def has_header(headers, name):
"""
Is header named ``name`` present in headers?
"""
name = name.lower()
for header, value in headers:
if header.lower() == name:
return True
return False
def header_value(headers, name):
"""
Returns the header's value, or None if no such header. If a
header appears more than once, all the values of the headers
are joined with ','. Note that this is consistent /w RFC 2616
section 4.2 which states:
It MUST be possible to combine the multiple header fields
into one "field-name: field-value" pair, without changing
the semantics of the message, by appending each subsequent
field-value to the first, each separated by a comma.
However, note that the original netscape usage of 'Set-Cookie',
especially in MSIE which contains an 'expires' date will is not
compatible with this particular concatination method.
"""
name = name.lower()
result = [value for header, value in headers
if header.lower() == name]
if result:
return ','.join(result)
else:
return None
def remove_header(headers, name):
"""
Removes the named header from the list of headers. Returns the
value of that header, or None if no header found. If multiple
headers are found, only the last one is returned.
"""
name = name.lower()
i = 0
result = None
while i < len(headers):
if headers[i][0].lower() == name:
result = headers[i][1]
del headers[i]
continue
i += 1
return result
def replace_header(headers, name, value):
"""
Updates the headers replacing the first occurance of the given name
with the value provided; asserting that no further occurances
happen. Note that this is _not_ the same as remove_header and then
append, as two distinct operations (del followed by an append) are
not atomic in a threaded environment. Returns the previous header
value for the provided name, if any. Clearly one should not use
this function with ``set-cookie`` or other names that may have more
than one occurance in the headers.
"""
name = name.lower()
i = 0
result = None
while i < len(headers):
if headers[i][0].lower() == name:
assert not result, "two values for the header '%s' found" % name
result = headers[i][1]
headers[i] = (name, value)
i += 1
if not result:
headers.append((name, value))
return result
############################################################
## Deprecated methods
############################################################
def error_body_response(error_code, message, __warn=True):
"""
Returns a standard HTML response page for an HTTP error.
**Note:** Deprecated
"""
if __warn:
warnings.warn(
'wsgilib.error_body_response is deprecated; use the '
'wsgi_application method on an HTTPException object '
'instead', DeprecationWarning, 2)
return '''\
<html>
<head>
<title>%(error_code)s</title>
</head>
<body>
<h1>%(error_code)s</h1>
%(message)s
</body>
</html>''' % {
'error_code': error_code,
'message': message,
}
def error_response(environ, error_code, message,
debug_message=None, __warn=True):
"""
Returns the status, headers, and body of an error response.
Use like:
.. code-block:: Python
status, headers, body = wsgilib.error_response(
'301 Moved Permanently', 'Moved to <a href="%s">%s</a>'
% (url, url))
start_response(status, headers)
return [body]
**Note:** Deprecated
"""
if __warn:
warnings.warn(
'wsgilib.error_response is deprecated; use the '
'wsgi_application method on an HTTPException object '
'instead', DeprecationWarning, 2)
if debug_message and environ.get('paste.config', {}).get('debug'):
message += '\n\n<!-- %s -->' % debug_message
body = error_body_response(error_code, message, __warn=False)
headers = [('content-type', 'text/html'),
('content-length', str(len(body)))]
return error_code, headers, body
def error_response_app(error_code, message, debug_message=None,
__warn=True):
"""
An application that emits the given error response.
**Note:** Deprecated
"""
if __warn:
warnings.warn(
'wsgilib.error_response_app is deprecated; use the '
'wsgi_application method on an HTTPException object '
'instead', DeprecationWarning, 2)
def application(environ, start_response):
status, headers, body = error_response(
environ, error_code, message,
debug_message=debug_message, __warn=False)
start_response(status, headers)
return [body]
return application
| |
#
#
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Global Configuration data for Ganeti.
This module provides the interface to a special case of cluster
configuration data, which is mostly static and available to all nodes.
"""
import sys
import errno
import logging
from ganeti import compat
from ganeti import errors
from ganeti import constants
from ganeti import utils
from ganeti import netutils
from ganeti import pathutils
SSCONF_LOCK_TIMEOUT = 10
#: Valid ssconf keys
_VALID_KEYS = compat.UniqueFrozenset([
constants.SS_CLUSTER_NAME,
constants.SS_CLUSTER_TAGS,
constants.SS_FILE_STORAGE_DIR,
constants.SS_SHARED_FILE_STORAGE_DIR,
constants.SS_GLUSTER_STORAGE_DIR,
constants.SS_MASTER_CANDIDATES,
constants.SS_MASTER_CANDIDATES_IPS,
constants.SS_MASTER_CANDIDATES_CERTS,
constants.SS_MASTER_IP,
constants.SS_MASTER_NETDEV,
constants.SS_MASTER_NETMASK,
constants.SS_MASTER_NODE,
constants.SS_NODE_LIST,
constants.SS_NODE_PRIMARY_IPS,
constants.SS_NODE_SECONDARY_IPS,
constants.SS_NODE_VM_CAPABLE,
constants.SS_OFFLINE_NODES,
constants.SS_ONLINE_NODES,
constants.SS_PRIMARY_IP_FAMILY,
constants.SS_INSTANCE_LIST,
constants.SS_RELEASE_VERSION,
constants.SS_HYPERVISOR_LIST,
constants.SS_MAINTAIN_NODE_HEALTH,
constants.SS_UID_POOL,
constants.SS_NODEGROUPS,
constants.SS_NETWORKS,
constants.SS_HVPARAMS_XEN_PVM,
constants.SS_HVPARAMS_XEN_FAKE,
constants.SS_HVPARAMS_XEN_HVM,
constants.SS_HVPARAMS_XEN_KVM,
constants.SS_HVPARAMS_XEN_CHROOT,
constants.SS_HVPARAMS_XEN_LXC,
constants.SS_ENABLED_USER_SHUTDOWN,
])
#: Maximum size for ssconf files
_MAX_SIZE = 128 * 1024
def ReadSsconfFile(filename):
"""Reads an ssconf file and verifies its size.
@type filename: string
@param filename: Path to file
@rtype: string
@return: File contents without newlines at the end
@raise RuntimeError: When the file size exceeds L{_MAX_SIZE}
"""
statcb = utils.FileStatHelper()
data = utils.ReadFile(filename, size=_MAX_SIZE, preread=statcb)
if statcb.st.st_size > _MAX_SIZE:
msg = ("File '%s' has a size of %s bytes (up to %s allowed)" %
(filename, statcb.st.st_size, _MAX_SIZE))
raise RuntimeError(msg)
return data.rstrip("\n")
class SimpleStore(object):
"""Interface to static cluster data.
This is different that the config.ConfigWriter and
SimpleConfigReader classes in that it holds data that will always be
present, even on nodes which don't have all the cluster data.
Other particularities of the datastore:
- keys are restricted to predefined values
"""
def __init__(self, cfg_location=None, _lockfile=pathutils.SSCONF_LOCK_FILE):
if cfg_location is None:
self._cfg_dir = pathutils.DATA_DIR
else:
self._cfg_dir = cfg_location
self._lockfile = _lockfile
def KeyToFilename(self, key):
"""Convert a given key into filename.
"""
if key not in _VALID_KEYS:
raise errors.ProgrammerError("Invalid key requested from SSConf: '%s'"
% str(key))
filename = self._cfg_dir + "/" + constants.SSCONF_FILEPREFIX + key
return filename
def _ReadFile(self, key, default=None):
"""Generic routine to read keys.
This will read the file which holds the value requested. Errors
will be changed into ConfigurationErrors.
"""
filename = self.KeyToFilename(key)
try:
return ReadSsconfFile(filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT and default is not None:
return default
raise errors.ConfigurationError("Can't read ssconf file %s: %s" %
(filename, str(err)))
def ReadAll(self):
"""Reads all keys and returns their values.
@rtype: dict
@return: Dictionary, ssconf key as key, value as value
"""
result = []
for key in _VALID_KEYS:
try:
value = self._ReadFile(key)
except errors.ConfigurationError:
# Ignore non-existing files
pass
else:
result.append((key, value))
return dict(result)
def WriteFiles(self, values, dry_run=False):
"""Writes ssconf files used by external scripts.
@type values: dict
@param values: Dictionary of (name, value)
@type dry_run boolean
@param dry_run: Whether to perform a dry run
"""
ssconf_lock = utils.FileLock.Open(self._lockfile)
# Get lock while writing files
ssconf_lock.Exclusive(blocking=True, timeout=SSCONF_LOCK_TIMEOUT)
try:
for name, value in values.iteritems():
if isinstance(value, (list, tuple)):
value = "\n".join(value)
if value and not value.endswith("\n"):
value += "\n"
if len(value) > _MAX_SIZE:
msg = ("Value '%s' has a length of %s bytes, but only up to %s are"
" allowed" % (name, len(value), _MAX_SIZE))
raise errors.ConfigurationError(msg)
utils.WriteFile(self.KeyToFilename(name), data=value,
mode=constants.SS_FILE_PERMS,
dry_run=dry_run)
finally:
ssconf_lock.Unlock()
def GetFileList(self):
"""Return the list of all config files.
This is used for computing node replication data.
"""
return [self.KeyToFilename(key) for key in _VALID_KEYS]
def GetClusterName(self):
"""Get the cluster name.
"""
return self._ReadFile(constants.SS_CLUSTER_NAME)
def GetFileStorageDir(self):
"""Get the file storage dir.
"""
return self._ReadFile(constants.SS_FILE_STORAGE_DIR)
def GetSharedFileStorageDir(self):
"""Get the shared file storage dir.
"""
return self._ReadFile(constants.SS_SHARED_FILE_STORAGE_DIR)
def GetGlusterStorageDir(self):
"""Get the Gluster storage dir.
"""
return self._ReadFile(constants.SS_GLUSTER_STORAGE_DIR)
def GetMasterCandidates(self):
"""Return the list of master candidates.
"""
data = self._ReadFile(constants.SS_MASTER_CANDIDATES)
nl = data.splitlines(False)
return nl
def GetMasterCandidatesIPList(self):
"""Return the list of master candidates' primary IP.
"""
data = self._ReadFile(constants.SS_MASTER_CANDIDATES_IPS)
nl = data.splitlines(False)
return nl
def GetMasterCandidatesCertMap(self):
"""Returns the map of master candidate UUIDs to ssl cert.
@rtype: dict of string to string
@return: dictionary mapping the master candidates' UUIDs
to their SSL certificate digests
"""
data = self._ReadFile(constants.SS_MASTER_CANDIDATES_CERTS)
lines = data.splitlines(False)
certs = {}
for line in lines:
(node_uuid, cert_digest) = line.split("=")
certs[node_uuid] = cert_digest
return certs
def GetMasterIP(self):
"""Get the IP of the master node for this cluster.
"""
return self._ReadFile(constants.SS_MASTER_IP)
def GetMasterNetdev(self):
"""Get the netdev to which we'll add the master ip.
"""
return self._ReadFile(constants.SS_MASTER_NETDEV)
def GetMasterNetmask(self):
"""Get the master netmask.
"""
try:
return self._ReadFile(constants.SS_MASTER_NETMASK)
except errors.ConfigurationError:
family = self.GetPrimaryIPFamily()
ipcls = netutils.IPAddress.GetClassFromIpFamily(family)
return ipcls.iplen
def GetMasterNode(self):
"""Get the hostname of the master node for this cluster.
"""
return self._ReadFile(constants.SS_MASTER_NODE)
def GetNodeList(self):
"""Return the list of cluster nodes.
"""
data = self._ReadFile(constants.SS_NODE_LIST)
nl = data.splitlines(False)
return nl
def GetOnlineNodeList(self):
"""Return the list of online cluster nodes.
"""
data = self._ReadFile(constants.SS_ONLINE_NODES)
nl = data.splitlines(False)
return nl
def GetNodePrimaryIPList(self):
"""Return the list of cluster nodes' primary IP.
"""
data = self._ReadFile(constants.SS_NODE_PRIMARY_IPS)
nl = data.splitlines(False)
return nl
def GetNodeSecondaryIPList(self):
"""Return the list of cluster nodes' secondary IP.
"""
data = self._ReadFile(constants.SS_NODE_SECONDARY_IPS)
nl = data.splitlines(False)
return nl
def GetNodesVmCapable(self):
"""Return the cluster nodes' vm capable value.
@rtype: dict of string to bool
@return: mapping of node names to vm capable values
"""
data = self._ReadFile(constants.SS_NODE_VM_CAPABLE)
vm_capable = {}
for line in data.splitlines(False):
(node_uuid, node_vm_capable) = line.split("=")
vm_capable[node_uuid] = node_vm_capable == "True"
return vm_capable
def GetNodegroupList(self):
"""Return the list of nodegroups.
"""
data = self._ReadFile(constants.SS_NODEGROUPS)
nl = data.splitlines(False)
return nl
def GetNetworkList(self):
"""Return the list of networks.
"""
data = self._ReadFile(constants.SS_NETWORKS)
nl = data.splitlines(False)
return nl
def GetClusterTags(self):
"""Return the cluster tags.
"""
data = self._ReadFile(constants.SS_CLUSTER_TAGS)
nl = data.splitlines(False)
return nl
def GetHypervisorList(self):
"""Return the list of enabled hypervisors.
"""
data = self._ReadFile(constants.SS_HYPERVISOR_LIST)
nl = data.splitlines(False)
return nl
def GetHvparamsForHypervisor(self, hvname):
"""Return the hypervisor parameters of the given hypervisor.
@type hvname: string
@param hvname: name of the hypervisor, must be in C{constants.HYPER_TYPES}
@rtype: dict of strings
@returns: dictionary with hypervisor parameters
"""
data = self._ReadFile(constants.SS_HVPARAMS_PREF + hvname)
lines = data.splitlines(False)
hvparams = {}
for line in lines:
(key, value) = line.split("=")
hvparams[key] = value
return hvparams
def GetHvparams(self):
"""Return the hypervisor parameters of all hypervisors.
@rtype: dict of dict of strings
@returns: dictionary mapping hypervisor names to hvparams
"""
all_hvparams = {}
for hv in constants.HYPER_TYPES:
all_hvparams[hv] = self.GetHvparamsForHypervisor(hv)
return all_hvparams
def GetMaintainNodeHealth(self):
"""Return the value of the maintain_node_health option.
"""
data = self._ReadFile(constants.SS_MAINTAIN_NODE_HEALTH)
# we rely on the bool serialization here
return data == "True"
def GetUidPool(self):
"""Return the user-id pool definition string.
The separator character is a newline.
The return value can be parsed using uidpool.ParseUidPool()::
ss = ssconf.SimpleStore()
uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\\n")
"""
data = self._ReadFile(constants.SS_UID_POOL)
return data
def GetPrimaryIPFamily(self):
"""Return the cluster-wide primary address family.
"""
try:
return int(self._ReadFile(constants.SS_PRIMARY_IP_FAMILY,
default=netutils.IP4Address.family))
except (ValueError, TypeError), err:
raise errors.ConfigurationError("Error while trying to parse primary IP"
" family: %s" % err)
def GetEnabledUserShutdown(self):
"""Return whether user shutdown is enabled.
@rtype: bool
@return: 'True' if user shutdown is enabled, 'False' otherwise
"""
return self._ReadFile(constants.SS_ENABLED_USER_SHUTDOWN) == "True"
def WriteSsconfFiles(values, dry_run=False):
"""Update all ssconf files.
Wrapper around L{SimpleStore.WriteFiles}.
"""
SimpleStore().WriteFiles(values, dry_run=dry_run)
def GetMasterAndMyself(ss=None):
"""Get the master node and my own hostname.
This can be either used for a 'soft' check (compared to CheckMaster,
which exits) or just for computing both at the same time.
The function does not handle any errors, these should be handled in
the caller (errors.ConfigurationError, errors.ResolverError).
@param ss: either a sstore.SimpleConfigReader or a
sstore.SimpleStore instance
@rtype: tuple
@return: a tuple (master node name, my own name)
"""
if ss is None:
ss = SimpleStore()
return ss.GetMasterNode(), netutils.Hostname.GetSysName()
def CheckMaster(debug, ss=None):
"""Checks the node setup.
If this is the master, the function will return. Otherwise it will
exit with an exit code based on the node status.
"""
try:
master_name, myself = GetMasterAndMyself(ss)
except errors.ConfigurationError, err:
print "Cluster configuration incomplete: '%s'" % str(err)
sys.exit(constants.EXIT_NODESETUP_ERROR)
except errors.ResolverError, err:
sys.stderr.write("Cannot resolve my own name (%s)\n" % err.args[0])
sys.exit(constants.EXIT_NODESETUP_ERROR)
if myself != master_name:
if debug:
sys.stderr.write("Not master, exiting.\n")
sys.exit(constants.EXIT_NOTMASTER)
def VerifyClusterName(name, _cfg_location=None):
"""Verifies cluster name against a local cluster name.
@type name: string
@param name: Cluster name
"""
sstore = SimpleStore(cfg_location=_cfg_location)
try:
local_name = sstore.GetClusterName()
except errors.ConfigurationError, err:
logging.debug("Can't get local cluster name: %s", err)
else:
if name != local_name:
raise errors.GenericError("Current cluster name is '%s'" % local_name)
def VerifyKeys(keys):
"""Raises an exception if unknown ssconf keys are given.
@type keys: sequence
@param keys: Key names to verify
@raise errors.GenericError: When invalid keys were found
"""
invalid = frozenset(keys) - _VALID_KEYS
if invalid:
raise errors.GenericError("Invalid ssconf keys: %s" %
utils.CommaJoin(sorted(invalid)))
| |
from __future__ import absolute_import, division, print_function
import os
import re
import sys
from os.path import isdir, isfile, join
from conda.compat import iteritems, PY3, text_type
from conda.utils import memoized, md5_file
import conda.config as cc
from conda.resolve import MatchSpec
from conda.cli.common import specs_from_url
from . import exceptions
try:
import yaml
# try to import C loader
try:
from yaml import CBaseLoader as BaseLoader
except ImportError:
from yaml import BaseLoader
except ImportError:
sys.exit('Error: could not import yaml (required to read meta.yaml '
'files of conda recipes)')
from conda_build.config import config
from conda_build.utils import comma_join
def ns_cfg():
# Remember to update the docs of any of this changes
plat = cc.subdir
py = config.CONDA_PY
np = config.CONDA_NPY
pl = config.CONDA_PERL
assert isinstance(py, int), py
d = dict(
linux = plat.startswith('linux-'),
linux32 = bool(plat == 'linux-32'),
linux64 = bool(plat == 'linux-64'),
arm = plat.startswith('linux-arm'),
osx = plat.startswith('osx-'),
unix = plat.startswith(('linux-', 'osx-')),
win = plat.startswith('win-'),
win32 = bool(plat == 'win-32'),
win64 = bool(plat == 'win-64'),
pl = pl,
py = py,
py3k = bool(30 <= py < 40),
py2k = bool(20 <= py < 30),
py26 = bool(py == 26),
py27 = bool(py == 27),
py33 = bool(py == 33),
py34 = bool(py == 34),
py35 = bool(py == 35),
np = np,
os = os,
environ = os.environ,
)
for machine in cc.non_x86_linux_machines:
d[machine] = bool(plat == 'linux-%s' % machine)
d.update(os.environ)
return d
sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
def select_lines(data, namespace):
lines = []
for i, line in enumerate(data.splitlines()):
line = line.rstrip()
if line.lstrip().startswith('#'):
# Don't bother with comment only lines
continue
m = sel_pat.match(line)
if m:
cond = m.group(3)
try:
if eval(cond, namespace, {}):
lines.append(m.group(1))
except:
sys.exit('''\
Error: Invalid selector in meta.yaml line %d:
%s
''' % (i + 1, line))
sys.exit(1)
continue
lines.append(line)
return '\n'.join(lines) + '\n'
@memoized
def yamlize(data):
try:
return yaml.load(data, Loader=BaseLoader)
except yaml.parser.ParserError as e:
if '{{' in data:
try:
import jinja2
jinja2 # Avoid pyflakes failure: 'jinja2' imported but unused
except ImportError:
raise exceptions.UnableToParseMissingJinja2(original=e)
raise exceptions.UnableToParse(original=e)
allowed_license_families = set("""
AGPL
Apache
BSD
GPL2
GPL3
LGPL
MIT
Other
PSF
Proprietary
Public-Domain
""".split())
def ensure_valid_license_family(meta):
try:
license_family = meta['about']['license_family']
except KeyError:
return
if license_family not in allowed_license_families:
raise RuntimeError(exceptions.indent(
"about/license_family '%s' not allowed. Allowed families are %s." %
(license_family, comma_join(sorted(allowed_license_families)))))
def ensure_valid_fields(meta):
try:
pin_depends = meta['build']['pin_depends']
except KeyError:
pin_depends = ''
if pin_depends not in ('', 'record', 'strict'):
raise RuntimeError("build/pin_depends cannot be '%s'" % pin_depends)
def parse(data):
data = select_lines(data, ns_cfg())
res = yamlize(data)
# ensure the result is a dict
if res is None:
res = {}
for field in FIELDS:
if field not in res:
continue
if not res[field]:
res[field] = {}
if not isinstance(res[field], dict):
raise RuntimeError("The %s field should be a dict, not %s" %
(field, res[field].__class__.__name__))
# ensure those are lists
for field in ('source/patches',
'build/entry_points', 'build/script_env',
'build/features', 'build/track_features',
'requirements/build', 'requirements/run',
'requirements/conflicts', 'test/requires',
'test/files', 'test/commands', 'test/imports'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
if res[section].get(key, None) is None:
res[section][key] = []
# ensure those are strings
for field in ('package/version', 'build/string', 'build/pin_depends',
'source/svn_rev', 'source/git_tag', 'source/git_branch',
'source/md5', 'source/git_rev', 'source/path'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
val = res[section].get(key, '')
if val is None:
val = ''
res[section][key] = text_type(val)
# ensure these fields are booleans
trues = {'y', 'on', 'true', 'yes'}
falses = {'n', 'no', 'false', 'off'}
for field in ('build/osx_is_app', 'build/preserve_egg_dir',
'build/binary_relocation',
'build/detect_binary_files_with_prefix',
'build/skip', 'app/own_environment'):
section, key = field.split('/')
if res.get(section) is None:
res[section] = {}
try:
val = res[section].get(key, '').lower()
except AttributeError:
# val wasn't a string
continue
if val in trues:
res[section][key] = True
elif val in falses:
res[section][key] = False
ensure_valid_fields(res)
ensure_valid_license_family(res)
return sanitize(res)
def sanitize(meta):
"""
Sanitize the meta-data to remove aliases/handle deprecation
"""
# make a copy to avoid side-effects
meta = dict(meta)
sanitize_funs = [('source', _git_clean), ]
for section, func in sanitize_funs:
if section in meta:
meta[section] = func(meta[section])
return meta
def _git_clean(source_meta):
"""
Reduce the redundancy in git specification by removing git_tag and
git_branch.
If one is specified, copy to git_rev.
If more than one field is used to specified, exit
and complain.
"""
git_rev_tags_old = ('git_branch', 'git_tag')
git_rev = 'git_rev'
git_rev_tags = (git_rev,) + git_rev_tags_old
has_rev_tags = tuple(bool(source_meta[tag]) for
tag in git_rev_tags)
if sum(has_rev_tags) > 1:
msg = "Error: mulitple git_revs:"
msg += ', '.join("{}".format(key) for key, has in
zip(git_rev_tags, has_rev_tags) if has)
sys.exit(msg)
# make a copy of the input so we have no side-effects
ret_meta = dict(source_meta)
# loop over the old versions
for key, has in zip(git_rev_tags[1:], has_rev_tags[1:]):
# update if needed
if has:
ret_meta[git_rev_tags[0]] = ret_meta[key]
# and remove
del ret_meta[key]
return ret_meta
# If you update this please update the example in
# conda-docs/docs/source/build.rst
FIELDS = {
'package': ['name', 'version'],
'source': ['fn', 'url', 'md5', 'sha1', 'sha256', 'path',
'git_url', 'git_tag', 'git_branch', 'git_rev', 'git_depth',
'hg_url', 'hg_tag',
'svn_url', 'svn_rev', 'svn_ignore_externals',
'patches'],
'build': ['number', 'string', 'entry_points', 'osx_is_app',
'features', 'track_features', 'preserve_egg_dir',
'no_link', 'binary_relocation', 'script', 'noarch_python',
'has_prefix_files', 'binary_has_prefix_files', 'script_env',
'detect_binary_files_with_prefix', 'rpaths',
'always_include_files', 'skip', 'msvc_compiler',
'pin_depends' # pin_depends is experimental still
],
'requirements': ['build', 'run', 'conflicts'],
'app': ['entry', 'icon', 'summary', 'type', 'cli_opts',
'own_environment'],
'test': ['requires', 'commands', 'files', 'imports'],
'about': ['home', 'license', 'license_family',
'summary', 'readme', 'license_file'],
}
def check_bad_chrs(s, field):
bad_chrs = '=!@#$%^&*:;"\'\\|<>?/ '
if field in ('package/version', 'build/string'):
bad_chrs += '-'
for c in bad_chrs:
if c in s:
sys.exit("Error: bad character '%s' in %s: %s" % (c, field, s))
def handle_config_version(ms, ver):
"""
'ms' is an instance of MatchSpec, and 'ver' is the version from the
configuration, e.g. for ms.name == 'python', ver = 26 or None,
return a (sometimes new) MatchSpec object
"""
if ms.strictness == 3:
return ms
if ms.strictness == 2:
if ms.spec.split()[1] == 'x.x':
if ver is None:
raise RuntimeError("'%s' requires external setting" % ms.spec)
# (no return here - proceeds below)
else: # regular version
return ms
if ver is None or (ms.strictness == 1 and ms.name == 'numpy'):
return MatchSpec(ms.name)
ver = text_type(ver)
if '.' not in ver:
if ms.name == 'numpy':
ver = '%s.%s' % (ver[0], ver[1:])
else:
ver = '.'.join(ver)
return MatchSpec('%s %s*' % (ms.name, ver))
class MetaData(object):
def __init__(self, path):
assert isdir(path)
self.path = path
self.meta_path = join(path, 'meta.yaml')
self.requirements_path = join(path, 'requirements.txt')
if not isfile(self.meta_path):
self.meta_path = join(path, 'conda.yaml')
if not isfile(self.meta_path):
sys.exit("Error: meta.yaml or conda.yaml not found in %s" % path)
# Start with bare-minimum contents so we can call environ.get_dict() with impunity
# We'll immediately replace these contents in parse_again()
self.meta = parse("package:\n"
" name: uninitialized")
# This is the 'first pass' parse of meta.yaml, so not all variables are defined yet
# (e.g. GIT_FULL_HASH, etc. are undefined)
# Therefore, undefined jinja variables are permitted here
# In the second pass, we'll be more strict. See build.build()
self.parse_again(permit_undefined_jinja=True)
def parse_again(self, permit_undefined_jinja=False):
"""Redo parsing for key-value pairs that are not initialized in the
first pass.
permit_undefined_jinja: If True, *any* use of undefined jinja variables will
evaluate to an emtpy string, without emitting an error.
"""
if not self.meta_path:
return
self.meta = parse(self._get_contents(permit_undefined_jinja))
if (isfile(self.requirements_path) and
not self.meta['requirements']['run']):
self.meta.setdefault('requirements', {})
run_requirements = specs_from_url(self.requirements_path)
self.meta['requirements']['run'] = run_requirements
@classmethod
def fromdict(cls, metadata):
"""
Create a MetaData object from metadata dict directly.
"""
m = super(MetaData, cls).__new__(cls)
m.path = ''
m.meta_path = ''
m.meta = sanitize(metadata)
return m
def get_section(self, section):
return self.meta.get(section, {})
def get_value(self, field, default=None):
section, key = field.split('/')
value = self.get_section(section).get(key, default)
return value
def check_fields(self):
for section, submeta in iteritems(self.meta):
if section == 'extra':
continue
if section not in FIELDS:
sys.exit("Error: unknown section: %s" % section)
for key in submeta:
if key not in FIELDS[section]:
sys.exit("Error: in section %r: unknown key %r" %
(section, key))
def name(self):
res = self.get_value('package/name')
if not res:
sys.exit('Error: package/name missing in: %r' % self.meta_path)
res = text_type(res)
if res != res.lower():
sys.exit('Error: package/name must be lowercase, got: %r' % res)
check_bad_chrs(res, 'package/name')
return res
def version(self):
res = self.get_value('package/version')
if res is None:
sys.exit("Error: package/version missing in: %r" % self.meta_path)
check_bad_chrs(res, 'package/version')
return res
def build_number(self):
return int(self.get_value('build/number', 0))
def ms_depends(self, typ='run'):
res = []
name_ver_list = [
('python', config.CONDA_PY),
('numpy', config.CONDA_NPY),
('perl', config.CONDA_PERL),
('r', config.CONDA_R),
]
for spec in self.get_value('requirements/' + typ, []):
try:
ms = MatchSpec(spec)
except AssertionError:
raise RuntimeError("Invalid package specification: %r" % spec)
if ms.name == self.name():
raise RuntimeError("%s cannot depend on itself" % self.name())
for name, ver in name_ver_list:
if ms.name == name:
if self.get_value('build/noarch_python'):
continue
ms = handle_config_version(ms, ver)
for c in '=!@#$%^&*:;"\'\\|<>?/':
if c in ms.name:
sys.exit("Error: bad character '%s' in package name "
"dependency '%s'" % (c, ms.name))
parts = spec.split()
if len(parts) >= 2:
if parts[1] in {'>', '>=', '=', '==', '!=', '<', '<='}:
msg = ("Error: bad character '%s' in package version "
"dependency '%s'" % (parts[1], ms.name))
if len(parts) >= 3:
msg += "\nPerhaps you meant '%s %s%s'" % (ms.name,
parts[1], parts[2])
sys.exit(msg)
res.append(ms)
return res
def build_id(self):
ret = self.get_value('build/string')
if ret:
check_bad_chrs(ret, 'build/string')
return ret
res = []
version_pat = re.compile(r'(?:==)?(\d+)\.(\d+)')
for name, s in (('numpy', 'np'), ('python', 'py'),
('perl', 'pl'), ('r', 'r')):
for ms in self.ms_depends():
if ms.name == name:
try:
v = ms.spec.split()[1]
except IndexError:
if name not in ['numpy']:
res.append(s)
break
if any(i in v for i in ',|>!<'):
break
if name not in ['perl', 'r']:
match = version_pat.match(v)
if match:
res.append(s + match.group(1) + match.group(2))
else:
res.append(s + v.strip('*'))
break
features = self.get_value('build/features', [])
if res:
res.append('_')
if features:
res.extend(('_'.join(features), '_'))
res.append('%d' % self.build_number())
return ''.join(res)
def dist(self):
return '%s-%s-%s' % (self.name(), self.version(), self.build_id())
def pkg_fn(self):
return "%s.tar.bz2" % self.dist()
def is_app(self):
return bool(self.get_value('app/entry'))
def app_meta(self):
d = {'type': 'app'}
if self.get_value('app/icon'):
d['icon'] = '%s.png' % md5_file(join(
self.path, self.get_value('app/icon')))
for field, key in [('app/entry', 'app_entry'),
('app/type', 'app_type'),
('app/cli_opts', 'app_cli_opts'),
('app/summary', 'summary'),
('app/own_environment', 'app_own_environment')]:
value = self.get_value(field)
if value:
d[key] = value
return d
def info_index(self):
d = dict(
name = self.name(),
version = self.version(),
build = self.build_id(),
build_number = self.build_number(),
platform = cc.platform,
arch = cc.arch_name,
subdir = cc.subdir,
depends = sorted(' '.join(ms.spec.split())
for ms in self.ms_depends()),
)
for key in ('license', 'license_family'):
value = self.get_value('about/' + key)
if value:
d[key] = value
if self.get_value('build/features'):
d['features'] = ' '.join(self.get_value('build/features'))
if self.get_value('build/track_features'):
d['track_features'] = ' '.join(self.get_value('build/track_features'))
if self.get_value('build/noarch_python'):
d['platform'] = d['arch'] = None
d['subdir'] = 'noarch'
if self.is_app():
d.update(self.app_meta())
return d
def has_prefix_files(self):
ret = self.get_value('build/has_prefix_files', [])
if not isinstance(ret, list):
raise RuntimeError('build/has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/has_prefix_files paths must use / as the path delimiter on Windows")
return ret
def always_include_files(self):
return self.get_value('build/always_include_files', [])
def binary_has_prefix_files(self):
ret = self.get_value('build/binary_has_prefix_files', [])
if not isinstance(ret, list):
raise RuntimeError('build/binary_has_prefix_files should be a list of paths')
if sys.platform == 'win32':
if any('\\' in i for i in ret):
raise RuntimeError("build/binary_has_prefix_files paths must use / as the path delimiter on Windows")
return ret
def skip(self):
return self.get_value('build/skip', False)
def _get_contents(self, permit_undefined_jinja):
'''
Get the contents of our [meta.yaml|conda.yaml] file.
If jinja is installed, then the template.render function is called
before standard conda macro processors.
permit_undefined_jinja: If True, *any* use of undefined jinja variables will
evaluate to an emtpy string, without emitting an error.
'''
try:
import jinja2
except ImportError:
print("There was an error importing jinja2.", file=sys.stderr)
print("Please run `conda install jinja2` to enable jinja template support", file=sys.stderr)
with open(self.meta_path) as fd:
return fd.read()
from conda_build.jinja_context import context_processor
path, filename = os.path.split(self.meta_path)
loaders = [# search relative to '<conda_root>/Lib/site-packages/conda_build/templates'
jinja2.PackageLoader('conda_build'),
# search relative to RECIPE_DIR
jinja2.FileSystemLoader(path)
]
# search relative to current conda environment directory
conda_env_path = os.environ.get('CONDA_DEFAULT_ENV') # path to current conda environment
if conda_env_path and os.path.isdir(conda_env_path):
conda_env_path = os.path.abspath(conda_env_path)
conda_env_path = conda_env_path.replace('\\', '/') # need unix-style path
env_loader = jinja2.FileSystemLoader(conda_env_path)
loaders.append(jinja2.PrefixLoader({'$CONDA_DEFAULT_ENV': env_loader}))
undefined_type = jinja2.StrictUndefined
if permit_undefined_jinja:
class UndefinedNeverFail(jinja2.Undefined):
"""
A class for Undefined jinja variables.
This is even less strict than the default jinja2.Undefined class,
because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and {{ MY_UNDEFINED_VAR|int }}.
This can mask lots of errors in jinja templates, so it should only be used for a first-pass
parse, when you plan on running a 'strict' second pass later.
"""
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
__complex__ = __pow__ = __rpow__ = \
lambda *args, **kwargs: UndefinedNeverFail()
__str__ = __repr__ = \
lambda *args, **kwargs: u''
__int__ = lambda _: 0
__float__ = lambda _: 0.0
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
return UndefinedNeverFail()
def __setattr__(self, k, v):
pass
undefined_type = UndefinedNeverFail
env = jinja2.Environment(loader=jinja2.ChoiceLoader(loaders), undefined=undefined_type)
env.globals.update(ns_cfg())
env.globals.update(context_processor(self, path))
try:
template = env.get_or_select_template(filename)
return template.render(environment=env)
except jinja2.TemplateError as ex:
sys.exit("Error: Failed to render jinja template in {}:\n{}".format(self.meta_path, ex.message))
def __unicode__(self):
'''
String representation of the MetaData.
'''
return text_type(self.__dict__)
def __str__(self):
if PY3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf-8')
def __repr__(self):
'''
String representation of the MetaData.
'''
return self.__str__()
if __name__ == '__main__':
from pprint import pprint
from os.path import expanduser
m = MetaData(expanduser('~/conda-recipes/pycosat'))
pprint(m.info_index())
| |
"""SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py 4043 2009/02/23 09:06:45 scons"
import re
import string
import SCons.Node.FS
import SCons.Util
class _Null:
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return apply(Selector, (function,) + args, kw)
else:
return apply(Base, (function,) + args, kw)
class FindPathDirs:
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class Base:
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
node_class = SCons.Node.FS.Entry,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = function.keys()
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if not self.argument is _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path = ()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if not self.argument is _null:
list = self.function(node, env, path, self.argument)
else:
list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.get_factory(self.node_factory)
nodes = []
for l in list:
if self.node_class and not isinstance(l, self.node_class):
l = apply(node_factory, (l,), kw)
nodes.append(l)
return nodes
def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except AttributeError:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other)
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
apply(Base.__init__, (self, None,)+args, kw)
self.dict = dict
self.skeys = dict.keys()
def __call__(self, node, env, path = ()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
apply(Base.__init__, (self,) + args, kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
apply(Current.__init__, (self,) + args, kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
includes = node.includes
else:
includes = self.find_include_names (node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
try:
node.includes = map(intern, includes)
except TypeError:
node.includes = includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(include)
nodes.append((sortkey, n))
nodes.sort()
nodes = map(lambda pair: pair[1], nodes)
return nodes
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
return n, intern(include[1])
def sort_key(self, include):
return SCons.Node.FS._my_normcase(string.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import collections
import decimal
import json
import numpy as np
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_type, is_datetime_type, transform_series, transform_array
pd = import_optional('pandas')
rd = import_optional("dateutil.relativedelta")
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.floating):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
''' The required ``default`` method for JSONEncoder subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, pretty=None, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissable." % name)
if pretty is None:
pretty = settings.pretty(False)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import random
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self, chain='regtest2'):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain = chain
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bitcoind' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bitcoin-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
except KeyError:
self.log.exception("Key error")
except Exception:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=self.options.bitcoincli,
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
))
self.start_node(CACHE_NODE_ID)
# Wait for RPC connections to be ready
self.nodes[CACHE_NODE_ID].wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
self.nodes[CACHE_NODE_ID].generatetoaddress(
nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
assert_equal(self.nodes[CACHE_NODE_ID].getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
| |
# (C) Copyright 2017, 2019-2020 by Rocky Bernstein
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
CPython 1.5 bytecode opcodes
This is used in bytecode disassembly. This is similar to the
opcodes in Python's dis.py library.
"""
# These are used from outside this module
from xdis.cross_dis import findlabels, findlinestarts
from xdis.opcodes.base import (
compare_op,
const_op,
def_op,
extended_format_ATTR,
extended_format_CALL_FUNCTION,
extended_format_MAKE_FUNCTION_older,
extended_format_RAISE_VARARGS_older,
extended_format_RETURN_VALUE,
format_RAISE_VARARGS_older,
jabs_op,
jrel_op,
finalize_opcodes,
format_extended_arg,
local_op,
name_op,
nargs_op,
store_op,
# Although these aren't used here, they are exported
varargs_op,
update_pj2,
)
version = 1.5
python_implementation = "CPython"
cmp_op = (
"<",
"<=",
"==",
"!=",
">",
">=",
"in",
"not in",
"is",
"is not",
"exception match",
"BAD",
)
# Opcodes greater than 90 take an instruction operand or "argument"
# as opcode.py likes to call it.
HAVE_ARGUMENT = 90
l = locals()
l["python_version"] = version
l["cmp_op"] = cmp_op
l["HAVE_ARGUMENT"] = HAVE_ARGUMENT
# These are just to silence the import above
l["findlindstarts"] = findlinestarts
l["findlabels"] = findlabels
# FIXME: can we DRY this even more?
hascompare = []
hascondition = [] # conditional operator; has jump offset
hasconst = []
hasfree = []
hasjabs = []
hasjrel = []
haslocal = []
hasname = []
hasnargs = [] # For function-like calls
hasstore = [] # Some sort of store operation
hasvargs = [] # Similar but for operators BUILD_xxx
nofollow = [] # Instruction doesn't fall to the next opcode
# oppush[op] => number of stack entries pushed
# -9 means handle special. Note his forces oppush[i] - oppop[i] negative
oppush = [0] * 256
# oppop[op] => number of stack entries popped
# -1 means handle special.
oppop = [0] * 256
opmap = {}
opname = [""] * 256
for op in range(256):
opname[op] = "<%r>" % (op,)
del op
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op(l, "STOP_CODE", 0, 0, 0, fallthrough=False)
def_op(l, "POP_TOP", 1)
def_op(l, "ROT_TWO", 2)
def_op(l, "ROT_THREE", 3)
def_op(l, "DUP_TOP", 4)
def_op(l, "UNARY_POSITIVE", 10, 1, 1)
def_op(l, "UNARY_NEGATIVE", 11, 1, 1)
def_op(l, "UNARY_NOT", 12, 1, 1)
def_op(l, "UNARY_CONVERT", 13, 1, 1)
def_op(l, "UNARY_INVERT", 15, 1, 1)
def_op(l, "BINARY_POWER", 19, 1, 1)
def_op(l, "BINARY_MULTIPLY", 20, 2, 1)
def_op(l, "BINARY_DIVIDE", 21, 2, 1)
def_op(l, "BINARY_MODULO", 22, 2, 1)
def_op(l, "BINARY_ADD", 23, 2, 1)
def_op(l, "BINARY_SUBTRACT", 24, 2, 1)
def_op(l, "BINARY_SUBSCR", 25, 2, 1)
def_op(l, "SLICE+0", 30, 1, 1)
def_op(l, "SLICE+1", 31, 2, 1)
def_op(l, "SLICE+2", 32, 2, 1)
def_op(l, "SLICE+3", 33, 3, 1)
store_op(l, "STORE_SLICE+0", 40, 2, 0)
store_op(l, "STORE_SLICE+1", 41, 3, 0)
store_op(l, "STORE_SLICE+2", 42, 3, 0)
store_op(l, "STORE_SLICE+3", 43, 4, 0)
def_op(l, "DELETE_SLICE+0", 50, 1, 0)
def_op(l, "DELETE_SLICE+1", 51, 2, 0)
def_op(l, "DELETE_SLICE+2", 52, 2, 0)
def_op(l, "DELETE_SLICE+3", 53, 3, 0)
store_op(l, "STORE_SUBSCR", 60, 3, 0) # Implements TOS1[TOS] = TOS2.
def_op(l, "DELETE_SUBSCR", 61, 2, 0) # Implements del TOS1[TOS].
def_op(l, "BINARY_LSHIFT", 62, 2, 1)
def_op(l, "BINARY_RSHIFT", 63, 2, 1)
def_op(l, "BINARY_AND", 64, 2, 1)
def_op(l, "BINARY_XOR", 65, 2, 1)
def_op(l, "BINARY_OR", 66, 2, 1)
def_op(l, "PRINT_EXPR", 70, 1, 0)
def_op(l, "PRINT_ITEM", 71, 1, 0)
def_op(l, "PRINT_NEWLINE", 72, 0, 0)
def_op(l, "BREAK_LOOP", 80, 0, 0, fallthrough=False)
def_op(l, "LOAD_LOCALS", 82, 0, 1)
def_op(l, "RETURN_VALUE", 83, 1, 0, fallthrough=False)
def_op(l, "EXEC_STMT", 85, 3, 0)
def_op(l, "POP_BLOCK", 87, 0, 0)
def_op(l, "END_FINALLY", 88, 1, 0)
def_op(l, "BUILD_CLASS", 89, 3, 0)
# HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
store_op(l, "STORE_NAME", 90, 1, 0, is_type="name") # Operand is in name list
name_op(l, "DELETE_NAME", 91, 0, 0) # ""
varargs_op(l, "UNPACK_TUPLE", 92) # Number of tuple items
def_op(l, "UNPACK_LIST", 93) # Number of list items
store_op(l, "STORE_ATTR", 95, 2, 0, is_type="name") # Operand is in name list
name_op(l, "DELETE_ATTR", 96, 1, 0) # ""
store_op(l, "STORE_GLOBAL", 97, 1, 0, is_type="name") # ""
name_op(l, "DELETE_GLOBAL", 98, 0, 0) # ""
const_op(l, "LOAD_CONST", 100, 0, 1) # Operand is in const list
name_op(l, "LOAD_NAME", 101, 0, 1) # Operand is in name list
varargs_op(l, "BUILD_TUPLE", 102, -1, 1) # Number of tuple items
varargs_op(l, "BUILD_LIST", 103, -1, 1) # Number of list items
varargs_op(l, "BUILD_MAP", 104, -1, 1) # Always zero for now
name_op(l, "LOAD_ATTR", 105, 1, 1) # Operand is in name list
compare_op(l, "COMPARE_OP", 106, 2, 1) # Comparison operator
name_op(l, "IMPORT_NAME", 107, 2, 1) # Operand is in name list
name_op(l, "IMPORT_FROM", 108, 0, 1) # Operand is in name list
jrel_op(l, "JUMP_FORWARD", 110, 0, 0, fallthrough=False) # Number of bytes to skip
jrel_op(l, "JUMP_IF_FALSE", 111, 1, 1, True) # ""
jrel_op(l, "JUMP_IF_TRUE", 112, 1, 1, True) # ""
jabs_op(l, "JUMP_ABSOLUTE", 113, 0, 0, fallthrough=False) # Target byte offset from beginning of code
def_op(l, "FOR_LOOP", 114) # Number of bytes to skip
name_op(l, "LOAD_GLOBAL", 116, 0, 1) # Operand is in name list
jrel_op(l, "SETUP_LOOP", 120, 0, 0, conditional=True) # Distance to target address
jrel_op(l, "SETUP_EXCEPT", 121, 0, 0) # ""
jrel_op(l, "SETUP_FINALLY", 122, 0, 0) # ""
local_op(l, "LOAD_FAST", 124, 0, 1) # Local variable number
store_op(l, "STORE_FAST", 125, 1, 0, is_type="local") # Local variable number
local_op(l, "DELETE_FAST", 126) # Local variable number
def_op(l, "SET_LINENO", 127) # Current line number
def_op(l, "RAISE_VARARGS", 130, -1, 0, fallthrough=False)
# Number of raise arguments (1, 2, or 3)
nargs_op(l, "CALL_FUNCTION", 131, -1, 1) # #args + (#kwargs << 8)
def_op(l, "MAKE_FUNCTION", 132, -1, 1) # Number of args with default values
varargs_op(l, "BUILD_SLICE", 133, -1, 1) # Number of items
def_op(l, "EXTENDED_ARG", 143)
EXTENDED_ARG = 143
fields2copy = """cmp_op hasjabs""".split()
update_pj2(globals(), l)
opcode_arg_fmt = {
"EXTENDED_ARG": format_extended_arg,
"RAISE_VARARGS": format_RAISE_VARARGS_older
}
finalize_opcodes(l)
opcode_extended_fmt = {
"CALL_FUNCTION": extended_format_CALL_FUNCTION,
"LOAD_ATTR": extended_format_ATTR,
"MAKE_FUNCTION": extended_format_MAKE_FUNCTION_older,
"RAISE_VARARGS": extended_format_RAISE_VARARGS_older,
"RETURN_VALUE": extended_format_RETURN_VALUE,
"STORE_ATTR": extended_format_ATTR,
}
| |
# Copyright (C) 2012, Christof Buchbender
# BSD Licencse
import math
import astrolyze.functions.constants as const
# Constant conversion factors.
#==============> Approved !!! <==========================
WattToErgs = 1e7 # 1W = 1e7 erg/s
ErgsToWatt = 1e-7 # 1W = 1e-7 erg/s
JanskyToWatt = 1e-26 # 1Jy = 1e-26 W/m2/Hz
WattToJansky = 1e26 # 1W = 1e26 Jy * m2 * Hz
ErgsToJansky_cm = 1e23 # 1 erg/s = 1e23 Jy * cm2 * Hz * s
JanskyToErgs_cm = 1e-23 # 1 Jy = 1e-23 erg/s/cm2/Hz
ErgsToJansky_m = 1e19 # 1 erg/s = 1e19 Jy * m2 * Hz * s
JanskyToErgs_m = 1e-19 # 1 Jy = 1e-19 erg/s/m2/Hz
#==============> Approved !!! <==========================
def WmToKkms(x, resolution=0, sterad=False, ToKKms=False, m2_or_cm2='m',
nu_or_lambda='nu'):
'''
Conversion between W/m2 and K km/s.
Parameters
----------
x : float
wavelenght/frequency [GHZ].
resolution : float
ToKKms : True or False
Direction of the conversion.
sterad : True or False
If False convert from per beam to per sterad.
m2_or_cm2 : string
Choose if conversion to/from W m-2 oder W cm-2. ``'m2'`` or ``'cm2'``.
Returns
-------
factor : float
The conversion factor.
'''
# To W=Joule/s => Joule = 1e7 erg
factor = 1
if m2_or_cm2 == 'cm2':
factor = factor * 100 * 100
factor = factor * 1e7 # erg/m2/s
factor = factor / 1e4 # erg/cm2/s
if sterad == False:
beamsr = 1.133 * (resolution * const.a2r) ** 2
factor = factor / beamsr # erg/cm2/s/sr
if nu_or_lambda == 'lambda':
x = c/x
# Umrechung zwischen ergs/s/cm2/sr = 2 k(CGS) nu^3/c(sm)^3 K km/s
# => to make the units fit we have to multiply by 1*km in cm -> 1e5
# i.e. const.km_in_cm
# converts from K - > ergs
conversionFactor = (2 * k_CGS * x ** 3 * const.km_in_cm /
(const.c_in_cm ** 3))
factor = factor / conversionFactor
if ToKKms == True:
return 1/factor
if ToKKms == False:
return factor
def ergToKkms(x, toErg=False, nu_or_lambda='nu'):
r"""
Conversion between ergs/cm2/s/sr and K km/s.
Parameters
----------
x : float
wavelenght/frequency [GHZ],
toErg : True or False
True converts the other direction, i.e. from K km/s to ergs/cm2/s/sr.
nu_or_lambda : string
Choose type of x: frequency = ``'nu'`` or wavelenght = ``'lambda'``.
Returns
-------
factor : float
The conversion factor.
Notes
-----
Approved.
"""
# To W=Joule/s => Joule = 1e7 erg
factor = 1
#print value
if nu_or_lambda == 'lambda':
x = const.c / x
# Conversion between erg/s/cm2/sr = 2k(CGS) nu^3/c(cm)^3 K km/s
# k(CGS) is Boltzsmanns constant in units of the CGS, nu the frequency of
# the measusrement
# c(cm) is the speed of light in cm.
# => to make the units fit we have to multiply by 1*km in cm -> 1e5
# i.e. const.km_in_cm
# converts from K - > ergs
conversionFactor = (2 * const.k_CGS * x ** 3 * const.km_in_cm /
(const.c_in_cm **3))
factor = factor / conversionFactor
if toErg:
return factor
if toErg:
return 1 / factor
def Int2Lum(distance_in_pc, cm_or_m='cm'):
r"""
Conversion factor to calculate luminosity from intensities
by integrating over the sky 4 pi Distance^2.
Parameters
----------
distance_in_pc : float
Distance to the source in parsecs.
cm_or_m : string
Choose wether the out put is in cm^2 = ``'cm'`` or in
m^2 = ``'m'``.
Notes
-----
Approved.
"""
if cm_or_m == 'm':
return 4 * math.pi * (distance_in_pc * const.parsec_in_m_1) ** 2
if cm_or_m == 'cm':
return 4 * math.pi * ( distance_in_pc * const.parsec_in_cm) ** 2
def Lum2Flux(distance_in_pc, cm_or_m='cm'):
r""" Conversion factor to calculate the flux of an object with a certain
luminosity in a certain distance by dividing over 4 pi Distance^2.
Parameters
----------
distance_in_pc : float
Distance to the source in parsecs.
cm_or_m : string
Choose wether the out put is in cm^2 = ``'cm'`` or in
m^2 = ``'m'``.
Notes
-----
Approved.
"""
if cm_or_m == 'm':
return 1/Int2Lum(distance_in_pc, cm_or_m)
if cm_or_m == 'cm':
return 1/Int2Lum(distance_in_pc, cm_or_m)
def JyBToErgsB(input_flux, distance, wavelength, invert=False, map_use=False):
r"""
Conversion between Jy/beam and ergs/beam.
Parameters
----------
input_flux : float
Flux to be converted in Jy/beam
distance : float
Distance to the source in parsec.
wavelength : float
Wavelength :math:`\lambda` in :math:`\mu m`.
map_use :
Returns
-------
The conversion factor (map_use = true) or the already converted flux
(map_use = False).
r"""
# change from Jansky to erg s-1 cm-2 Hz-1
conversion = JanskyToErgs_cm
# integrate over sky ergs s-1 Hz-1
conversion = conversion * Int2Lum(distance, cm_or_m='cm')
# multiply by frequency
conversion = conversion * const.c / (wavelength * 1e-6)
if invert == False:
if map_use == False:
return input_flux * conversion
if map_use == True:
return conversion
if invert == True:
if map_use == False:
return input_flux / conversion
if map_use == True:
return 1/conversion
def JyBToWM2Kpc2(input_flux, distance, major, minor, wavelength,
invert=False, map_use=False):
r"""
Conversion between Jy/beam and W m^-2 kpc^-2
Parameters
----------
input_flux : float
Flux to be converted.
distance : float
Distance to source in parsec.
major : float
Major Axis Beam (arcsec).
minor : float
Minor Axis Beam(arcsec).
wavelength : float
Wavelenght :math:`\lambda` in :math:`\mu m`
invert : True or False
Changes the direction of conversion.
Returns
-------
float : the converted Flux.
"""
# change to W/m2/Hz/beam
conversion = JanskyToWatt
# calculate the beamsize in kpc2
beamsize = 1.133 * (distance / 1e3) ** 2 * major * minor * const.a2r ** 2
beamsInKpc2 = 1 / beamsize
conversion = conversion * beamsInKpc2 # change to W/m2/Hz/kpc2
conversion = conversion * const.c / (wavelength * 1e-6) # to W/m2/kpc2
if invert == False:
return input_flux * conversion
if invert == True:
return input_flux / conversion
def JyBToWKpc2(input_flux, distance, major, minor,
wavelength, invert=False, map_use=False):
r"""
Conversion from JyB to W kpc^-2.
Parameters
----------
input_flux : float
Flux to be converted.
distance : float
Distance to source in parsec.
major : float
Major Axis Beam (arcsec).
minor : float
Minor Axis Beam(arcsec).
wavelength : float
Wavelenght :math:`\lambda` in :math:`\mu m`.
invert : True or False
Changes the direction of conversion.
Returns
-------
float : the converted Flux.
"""
conversion = JanskyToWatt # change to W/m2/Hz/beam
beamsize = (1.133 *
(distance / 1e3) ** 2 *
major *
minor *
const.a2r ** 2) # calculate the beamsize in kpc2.
beamsInKpc2 = 1 / beamsize
conversion = conversion * beamsInKpc2 # change to W/m2/Hz/kpc2
conversion = conversion * const.c / (wavelength * 1e-6) # change to
# W/m2/kpc2
conversion = conversion * Int2Lum(distance, cm_or_m='m') # change to
# W/kpc2
if not invert:
return input_flux * conversion
if invert:
return input_flux / conversion
# The following functions are redundant maps/main.py contains the
# function flux_conversion, with the same functionality but with more
# flexibility. This is however not usable outside of the maps environment.
# The following functions can be used generally and are correct!!
def kelvin_to_jansky(x, major, minor, nu_or_lambda='nu'):
"""
Conversion from K.km/s (Tmb) and Jy/beam.
Parameters
----------
x : float
wavelength/frequency [GHZ],
major : float
Major Axis Beam (arcsec),
minor : float
Minor Axis Beam(arcsec),
nu_or_lambda : string
Choose type of x: frequency = ``'nu'`` or wavelength = ``'lambda'``.
Notes
-----
This function has been compared with the Time estimator from the
[GILDAS] package ASTRO and yields the same conversion factors.
References
----------
.. [GILDAS] www.iram.fr/IRAMFR/GILDAS
"""
if nu_or_lambda == 'lambda':
def fcon(wavelengths, major, minor):
return 1 / (1.359918e7 * wavelengths ** 2 / major / minor)
if nu_or_lambda == 'nu':
def fcon(frequency, major, minor):
return 1 / (1.222233e6 * frequency ** (-2) / major / minor)
return fcon(x, major, minor)
def jansky_to_kelvin(x, major, minor, nu_or_lambda='nu'):
"""
Conversion from Jy/beam to K.km/s (Tmb).
Parameters
----------
x : float
wavelength/frequency [GHZ],
major : float
Major Axis Beam (arcsec).
minor : float
Minor Axis Beam(arcsec).
nu_or_lambda : string
Choose type of x: frequency = ``'nu'`` or wavelength = ``'lambda'``.
Notes
-----
Same as :func:`kelvin_to_jansky`
"""
if nu_or_lambda == 'lambda':
def fcon(wavelengths, major, minor):
return 1.359918e7 * wavelengths ** 2 / major / minor
if nu_or_lambda == 'nu':
def fcon(frequency, Maj, Min):
return 1.222233e6 * frequency ** (-2) / major / minor
return fcon(x, major, minor)
def beam_to_pc2(beam, distance):
r'''
Provides the conversion factor from a per beam unit to a per pc2 unit.
Parameters
----------
beam : float
Beam size in arcsec.
distance : float
The distance to the source in parsec.
Returns
-------
conv : float
The conversion factor
'''
beamsize_pc2 = 1.133 * (const.a2r * beam * distance)**2
conversion_factor = 1. / beamsize_pc2
return conversion_factor
| |
from __future__ import unicode_literals
import csv
import pytz
from furl import furl
from datetime import datetime, timedelta
from django.db.models import Q
from django.views.defaults import page_not_found
from django.views.generic import FormView, DeleteView, ListView, TemplateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from osf.models.user import OSFUser
from osf.models.node import Node, NodeLog
from osf.models.spam import SpamStatus
from framework.auth import get_user
from framework.auth.utils import impute_names
from framework.auth.core import generate_verification_key
from website.mailchimp_utils import subscribe_on_confirm
from website import search
from admin.base.views import GuidView
from osf.models.admin_log_entry import (
update_admin_log,
USER_2_FACTOR,
USER_EMAILED,
USER_REMOVED,
USER_RESTORED,
CONFIRM_SPAM,
REINDEX_ELASTIC,
)
from admin.users.serializers import serialize_user
from admin.users.forms import EmailResetForm, WorkshopForm, UserSearchForm
from admin.users.templatetags.user_extras import reverse_user
from website.settings import DOMAIN, SUPPORT_EMAIL
class UserDeleteView(PermissionRequiredMixin, DeleteView):
""" Allow authorised admin user to remove/restore user
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_user.html'
context_object_name = 'user'
object = None
permission_required = 'osf.change_osfuser'
raise_exception = True
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
if user.date_disabled is None or kwargs.get('is_spam'):
user.disable_account()
user.is_registered = False
if 'spam_flagged' in user.system_tags:
user.tags.through.objects.filter(tag__name='spam_flagged').delete()
if 'ham_confirmed' in user.system_tags:
user.tags.through.objects.filter(tag__name='ham_confirmed').delete()
if kwargs.get('is_spam') and 'spam_confirmed' not in user.system_tags:
user.add_system_tag('spam_confirmed')
flag = USER_REMOVED
message = 'User account {} disabled'.format(user.pk)
else:
user.date_disabled = None
subscribe_on_confirm(user)
user.is_registered = True
user.tags.through.objects.filter(tag__name__in=['spam_flagged', 'spam_confirmed'], tag__system=True).delete()
if 'ham_confirmed' not in user.system_tags:
user.add_system_tag('ham_confirmed')
flag = USER_RESTORED
message = 'User account {} reenabled'.format(user.pk)
user.save()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message=message,
action_flag=flag
)
return redirect(reverse_user(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object')._id)
return super(UserDeleteView, self).get_context_data(**context)
def get_object(self, queryset=None):
return OSFUser.load(self.kwargs.get('guid'))
class SpamUserDeleteView(UserDeleteView):
"""
Allow authorized admin user to delete a spam user and mark all their nodes as private
"""
template_name = 'users/remove_spam_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if not node.is_registration and not node.is_spam:
node.confirm_spam(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed SPAM: {} when user {} marked as spam'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': True})
return super(SpamUserDeleteView, self).delete(request, *args, **kwargs)
class HamUserRestoreView(UserDeleteView):
"""
Allow authorized admin user to undelete a ham user
"""
template_name = 'users/restore_ham_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if node.is_spam:
node.confirm_ham(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed HAM: {} when user {} marked as ham'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': False})
return super(HamUserRestoreView, self).delete(request, *args, **kwargs)
class UserSpamList(PermissionRequiredMixin, ListView):
SPAM_TAG = 'spam_flagged'
paginate_by = 25
paginate_orphans = 1
ordering = ('date_disabled')
context_object_name = '-osfuser'
permission_required = ('osf.view_spam', 'osf.view_osfuser')
raise_exception = True
def get_queryset(self):
return OSFUser.objects.filter(tags__name=self.SPAM_TAG).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'users': map(serialize_user, query_set),
'page': page,
}
class UserFlaggedSpamList(UserSpamList, DeleteView):
SPAM_TAG = 'spam_flagged'
template_name = 'users/flagged_spam_list.html'
def delete(self, request, *args, **kwargs):
if not request.user.get_perms('osf.mark_spam'):
raise PermissionDenied("You don't have permission to update this user's spam status.")
user_ids = [
uid for uid in request.POST.keys()
if uid != 'csrfmiddlewaretoken'
]
for uid in user_ids:
user = OSFUser.load(uid)
if 'spam_flagged' in user.system_tags:
user.system_tags.remove('spam_flagged')
user.add_system_tag('spam_confirmed')
user.save()
update_admin_log(
user_id=self.request.user.id,
object_id=uid,
object_repr='User',
message='Confirmed SPAM: {}'.format(uid),
action_flag=CONFIRM_SPAM
)
return redirect('users:flagged-spam')
class UserKnownSpamList(UserSpamList):
SPAM_TAG = 'spam_confirmed'
template_name = 'users/known_spam_list.html'
class UserKnownHamList(UserSpamList):
SPAM_TAG = 'ham_confirmed'
template_name = 'users/known_spam_list.html'
class User2FactorDeleteView(UserDeleteView):
""" Allow authorised admin user to remove 2 factor authentication.
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_2_factor.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
try:
user.delete_addon('twofactor')
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Removed 2 factor auth for user {}'.format(user.pk),
action_flag=USER_2_FACTOR
)
return redirect(reverse_user(self.kwargs.get('guid')))
class UserFormView(PermissionRequiredMixin, FormView):
template_name = 'users/search.html'
object_type = 'osfuser'
permission_required = 'osf.view_osfuser'
raise_exception = True
form_class = UserSearchForm
def __init__(self, *args, **kwargs):
self.redirect_url = None
super(UserFormView, self).__init__(*args, **kwargs)
def form_valid(self, form):
guid = form.cleaned_data['guid']
name = form.cleaned_data['name']
email = form.cleaned_data['email']
if guid or email:
if email:
try:
user = OSFUser.objects.filter(Q(username=email) | Q(emails__address=email)).get()
guid = user.guids.first()._id
except OSFUser.DoesNotExist:
return page_not_found(self.request, AttributeError('User with email address {} not found.'.format(email)))
self.redirect_url = reverse('users:user', kwargs={'guid': guid})
elif name:
self.redirect_url = reverse('users:search_list', kwargs={'name': name})
return super(UserFormView, self).form_valid(form)
@property
def success_url(self):
return self.redirect_url
class UserSearchList(PermissionRequiredMixin, ListView):
template_name = 'users/list.html'
permission_required = 'osf.view_osfuser'
raise_exception = True
form_class = UserSearchForm
paginate_by = 25
def get_queryset(self):
query = OSFUser.objects.filter(fullname__icontains=self.kwargs['name']).only(
'guids', 'fullname', 'username', 'date_confirmed', 'date_disabled'
)
return query
def get_context_data(self, **kwargs):
users = self.get_queryset()
page_size = self.get_paginate_by(users)
paginator, page, query_set, is_paginated = self.paginate_queryset(users, page_size)
kwargs['page'] = page
kwargs['users'] = [{
'name': user.fullname,
'username': user.username,
'id': user.guids.first()._id,
'confirmed': user.date_confirmed,
'disabled': user.date_disabled if user.is_disabled else None
} for user in query_set]
return super(UserSearchList, self).get_context_data(**kwargs)
class UserView(PermissionRequiredMixin, GuidView):
template_name = 'users/user.html'
context_object_name = 'user'
permission_required = 'osf.view_osfuser'
raise_exception = True
def get_context_data(self, **kwargs):
kwargs = super(UserView, self).get_context_data(**kwargs)
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against
return kwargs
def get_object(self, queryset=None):
return serialize_user(OSFUser.load(self.kwargs.get('guid')))
class UserWorkshopFormView(PermissionRequiredMixin, FormView):
form_class = WorkshopForm
object_type = 'user'
template_name = 'users/workshop.html'
permission_required = 'osf.view_osfuser'
raise_exception = True
def form_valid(self, form):
csv_file = form.cleaned_data['document']
final = self.parse(csv_file)
file_name = csv_file.name
results_file_name = '{}_user_stats.csv'.format(file_name.replace(' ', '_').strip('.csv'))
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(results_file_name)
writer = csv.writer(response)
for row in final:
writer.writerow(row)
return response
@staticmethod
def find_user_by_email(email):
user_list = OSFUser.objects.filter(emails__address=email)
return user_list[0] if user_list.exists() else None
@staticmethod
def find_user_by_full_name(full_name):
user_list = OSFUser.objects.filter(fullname=full_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def find_user_by_family_name(family_name):
user_list = OSFUser.objects.filter(family_name=family_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def get_user_logs_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return NodeLog.objects.filter(user=user, date__gt=query_date)
@staticmethod
def get_user_nodes_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return Node.objects.filter(creator=user, date_created__gt=query_date)
def parse(self, csv_file):
""" Parse and add to csv file.
:param csv_file: Comma separated
:return: A list
"""
result = []
csv_reader = csv.reader(csv_file)
for index, row in enumerate(csv_reader):
if index == 0:
row.extend([
'OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Date'
])
result.append(row)
continue
email = row[5]
user_by_email = self.find_user_by_email(email)
if not user_by_email:
full_name = row[4]
try:
family_name = impute_names(full_name)['family']
except UnicodeDecodeError:
row.extend(['Unable to parse name'])
result.append(row)
continue
user_by_name = self.find_user_by_full_name(full_name) or self.find_user_by_family_name(family_name)
if not user_by_name:
row.extend(['', 0, 0, ''])
result.append(row)
continue
else:
user = user_by_name
else:
user = user_by_email
workshop_date = pytz.utc.localize(datetime.strptime(row[1], '%m/%d/%y'))
nodes = self.get_user_nodes_since_workshop(user, workshop_date)
user_logs = self.get_user_logs_since_workshop(user, workshop_date)
last_log_date = user_logs.latest().date.strftime('%m/%d/%y') if user_logs else ''
row.extend([
user.pk, len(user_logs), len(nodes), last_log_date
])
result.append(row)
return result
def form_invalid(self, form):
super(UserWorkshopFormView, self).form_invalid(form)
class GetUserLink(PermissionRequiredMixin, TemplateView):
permission_required = 'osf.change_osfuser'
template_name = 'users/get_link.html'
raise_exception = True
def get_link(self, user):
raise NotImplementedError()
def get_link_type(self):
# Used in the title of the link modal
raise NotImplementedError()
def get_claim_links(self, user):
return None
def get_context_data(self, **kwargs):
user = OSFUser.load(self.kwargs.get('guid'))
kwargs['user_link'] = self.get_link(user)
kwargs['username'] = user.username
kwargs['title'] = self.get_link_type()
kwargs['node_claim_links'] = self.get_claim_links(user)
return super(GetUserLink, self).get_context_data(**kwargs)
class GetUserConfirmationLink(GetUserLink):
def get_link(self, user):
return user.get_confirmation_url(user.username, force=True)
def get_link_type(self):
return 'User Confirmation'
class GetPasswordResetLink(GetUserLink):
def get_link(self, user):
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.verification_key_v2['expires'] = datetime.utcnow().replace(tzinfo=pytz.utc) + timedelta(hours=48)
user.save()
reset_abs_url = furl(DOMAIN)
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
return reset_abs_url
def get_link_type(self):
return 'Password Reset'
class GetUserClaimLinks(GetUserLink):
def get_claim_links(self, user):
links = []
for guid, value in user.unclaimed_records.iteritems():
node = Node.load(guid)
url = '{base_url}user/{uid}/{project_id}/claim/?token={token}'.format(
base_url=DOMAIN,
uid=user._id,
project_id=guid,
token=value['token']
)
links.append('Claim URL for node {}: {}'.format(node._id, url))
return links or ['User currently has no active unclaimed records for any nodes.']
def get_link(self, user):
return None
def get_link_type(self):
return 'Claim User'
class ResetPasswordView(PermissionRequiredMixin, FormView):
form_class = EmailResetForm
template_name = 'users/reset.html'
context_object_name = 'user'
permission_required = 'osf.change_osfuser'
raise_exception = True
def dispatch(self, request, *args, **kwargs):
self.user = OSFUser.load(self.kwargs.get('guid'))
if self.user is None:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
return super(ResetPasswordView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
self.initial = {
'guid': self.user._id,
'emails': [(r, r) for r in self.user.emails.values_list('address', flat=True)],
}
return super(ResetPasswordView, self).get_initial()
def get_context_data(self, **kwargs):
kwargs.setdefault('guid', self.user._id)
kwargs.setdefault('emails', self.user.emails)
return super(ResetPasswordView, self).get_context_data(**kwargs)
def form_valid(self, form):
email = form.cleaned_data.get('emails')
user = get_user(email)
if user is None or user._id != self.kwargs.get('guid'):
return HttpResponse(
'{} with id "{}" and email "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid'),
email
),
status=409
)
reset_abs_url = furl(DOMAIN)
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.save()
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
send_mail(
subject='Reset OSF Password',
message='Follow this link to reset your password: {}'.format(
reset_abs_url.url
),
from_email=SUPPORT_EMAIL,
recipient_list=[email]
)
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Emailed user {} a reset link.'.format(user.pk),
action_flag=USER_EMAILED
)
return super(ResetPasswordView, self).form_valid(form)
@property
def success_url(self):
return reverse_user(self.kwargs.get('guid'))
class UserReindexElastic(UserDeleteView):
template_name = 'users/reindex_user_elastic.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
search.search.update_user(user, async=False)
update_admin_log(
user_id=self.request.user.id,
object_id=user._id,
object_repr='User',
message='User Reindexed (Elastic): {}'.format(user._id),
action_flag=REINDEX_ELASTIC
)
return redirect(reverse_user(self.kwargs.get('guid')))
| |
"""Tests for dense recursive polynomials' arithmetics. """
from sympy.polys.densebasic import (
dup_normal, dmp_normal,
)
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_sub_term, dmp_sub_term,
dup_mul_term, dmp_mul_term,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
dup_lshift, dup_rshift,
dup_abs, dmp_abs,
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr, dmp_sqr,
dup_pow, dmp_pow,
dup_add_mul, dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_pdiv, dup_prem, dup_pquo, dup_pexquo,
dmp_pdiv, dmp_prem, dmp_pquo, dmp_pexquo,
dup_rr_div, dmp_rr_div,
dup_ff_div, dmp_ff_div,
dup_div, dup_rem, dup_quo, dup_exquo,
dmp_div, dmp_rem, dmp_quo, dmp_exquo,
dup_max_norm, dmp_max_norm,
dup_l1_norm, dmp_l1_norm,
dup_expand, dmp_expand,
)
from sympy.polys.polyerrors import (
ExactQuotientFailed,
)
from sympy.polys.specialpolys import f_0
from sympy.polys.algebratools import ZZ, QQ
from sympy import raises
F_0 = dmp_mul_ground(dmp_normal(f_0, 2, QQ), QQ(1,7), 2, QQ)
def test_dup_add_term():
f = dup_normal([], ZZ)
assert dup_add_term(f, ZZ(0), 0, ZZ) == dup_normal([], ZZ)
assert dup_add_term(f, ZZ(1), 0, ZZ) == dup_normal([1], ZZ)
assert dup_add_term(f, ZZ(1), 1, ZZ) == dup_normal([1, 0], ZZ)
assert dup_add_term(f, ZZ(1), 2, ZZ) == dup_normal([1, 0, 0], ZZ)
f = dup_normal([1,1,1], ZZ)
assert dup_add_term(f, ZZ(1), 0, ZZ) == dup_normal([1, 1, 2], ZZ)
assert dup_add_term(f, ZZ(1), 1, ZZ) == dup_normal([1, 2, 1], ZZ)
assert dup_add_term(f, ZZ(1), 2, ZZ) == dup_normal([2, 1, 1], ZZ)
assert dup_add_term(f, ZZ(1), 3, ZZ) == dup_normal([1, 1, 1, 1], ZZ)
assert dup_add_term(f, ZZ(1), 4, ZZ) == dup_normal([1, 0, 1, 1, 1], ZZ)
assert dup_add_term(f, ZZ(1), 5, ZZ) == dup_normal([1, 0, 0, 1, 1, 1], ZZ)
assert dup_add_term(f, ZZ(1), 6, ZZ) == dup_normal([1, 0, 0, 0, 1, 1, 1], ZZ)
assert dup_add_term(f,ZZ(-1), 2, ZZ) == dup_normal([1, 1], ZZ)
def test_dmp_add_term():
assert dmp_add_term([ZZ(1),ZZ(1),ZZ(1)], ZZ(1), 2, 0, ZZ) == \
dup_add_term([ZZ(1),ZZ(1),ZZ(1)], ZZ(1), 2, ZZ)
assert dmp_add_term(f_0, [[]], 3, 2, ZZ) == f_0
assert dmp_add_term(F_0, [[]], 3, 2, QQ) == F_0
def test_dup_sub_term():
f = dup_normal([], ZZ)
assert dup_sub_term(f, ZZ(0), 0, ZZ) == dup_normal([], ZZ)
assert dup_sub_term(f, ZZ(1), 0, ZZ) == dup_normal([-1], ZZ)
assert dup_sub_term(f, ZZ(1), 1, ZZ) == dup_normal([-1, 0], ZZ)
assert dup_sub_term(f, ZZ(1), 2, ZZ) == dup_normal([-1, 0, 0], ZZ)
f = dup_normal([1,1,1], ZZ)
assert dup_sub_term(f, ZZ(2), 0, ZZ) == dup_normal([ 1, 1,-1], ZZ)
assert dup_sub_term(f, ZZ(2), 1, ZZ) == dup_normal([ 1,-1, 1], ZZ)
assert dup_sub_term(f, ZZ(2), 2, ZZ) == dup_normal([-1, 1, 1], ZZ)
assert dup_sub_term(f, ZZ(1), 3, ZZ) == dup_normal([-1, 1, 1, 1], ZZ)
assert dup_sub_term(f, ZZ(1), 4, ZZ) == dup_normal([-1, 0, 1, 1, 1], ZZ)
assert dup_sub_term(f, ZZ(1), 5, ZZ) == dup_normal([-1, 0, 0, 1, 1, 1], ZZ)
assert dup_sub_term(f, ZZ(1), 6, ZZ) == dup_normal([-1, 0, 0, 0, 1, 1, 1], ZZ)
assert dup_sub_term(f, ZZ(1), 2, ZZ) == dup_normal([1, 1], ZZ)
def test_dmp_sub_term():
assert dmp_sub_term([ZZ(1),ZZ(1),ZZ(1)], ZZ(1), 2, 0, ZZ) == \
dup_sub_term([ZZ(1),ZZ(1),ZZ(1)], ZZ(1), 2, ZZ)
assert dmp_sub_term(f_0, [[]], 3, 2, ZZ) == f_0
assert dmp_sub_term(F_0, [[]], 3, 2, QQ) == F_0
def test_dup_mul_term():
f = dup_normal([], ZZ)
assert dup_mul_term(f, ZZ(2), 3, ZZ) == dup_normal([], ZZ)
f = dup_normal([1,1], ZZ)
assert dup_mul_term(f, ZZ(0), 3, ZZ) == dup_normal([], ZZ)
f = dup_normal([1,2,3], ZZ)
assert dup_mul_term(f, ZZ(2), 0, ZZ) == dup_normal([2,4,6], ZZ)
assert dup_mul_term(f, ZZ(2), 1, ZZ) == dup_normal([2,4,6,0], ZZ)
assert dup_mul_term(f, ZZ(2), 2, ZZ) == dup_normal([2,4,6,0,0], ZZ)
assert dup_mul_term(f, ZZ(2), 3, ZZ) == dup_normal([2,4,6,0,0,0], ZZ)
def test_dmp_mul_term():
assert dmp_mul_term([ZZ(1),ZZ(2),ZZ(3)], ZZ(2), 1, 0, ZZ) == \
dup_mul_term([ZZ(1),ZZ(2),ZZ(3)], ZZ(2), 1, ZZ)
assert dmp_mul_term([[]], [ZZ(2)], 3, 1, ZZ) == [[]]
assert dmp_mul_term([[ZZ(1)]], [], 3, 1, ZZ) == [[]]
assert dmp_mul_term([[ZZ(1),ZZ(2)], [ZZ(3)]], [ZZ(2)], 2, 1, ZZ) == \
[[ZZ(2),ZZ(4)], [ZZ(6)], [], []]
assert dmp_mul_term([[]], [QQ(2,3)], 3, 1, QQ) == [[]]
assert dmp_mul_term([[QQ(1,2)]], [], 3, 1, QQ) == [[]]
assert dmp_mul_term([[QQ(1,5),QQ(2,5)], [QQ(3,5)]], [QQ(2,3)], 2, 1, QQ) == \
[[QQ(2,15),QQ(4,15)], [QQ(6,15)], [], []]
def test_dup_mul_ground():
f = dup_normal([], ZZ)
assert dup_mul_ground(f, ZZ(2), ZZ) == dup_normal([], ZZ)
f = dup_normal([1,2,3], ZZ)
assert dup_mul_ground(f, ZZ(0), ZZ) == dup_normal([], ZZ)
assert dup_mul_ground(f, ZZ(2), ZZ) == dup_normal([2,4,6], ZZ)
def test_dmp_mul_ground():
assert dmp_mul_ground(f_0, ZZ(2), 2, ZZ) == [
[[ZZ(2),ZZ(4),ZZ(6)], [ZZ(4)]],
[[ZZ(6)]],
[[ZZ(8),ZZ(10),ZZ(12)], [ZZ(2),ZZ(4),ZZ(2)], [ZZ(2)]]
]
assert dmp_mul_ground(F_0, QQ(1,2), 2, QQ) == [
[[QQ(1,14),QQ(2,14),QQ(3,14)], [QQ(2,14)]],
[[QQ(3,14)]],
[[QQ(4,14),QQ(5,14),QQ(6,14)], [QQ(1,14),QQ(2,14),QQ(1,14)], [QQ(1,14)]]
]
def test_dup_quo_ground():
raises(ZeroDivisionError, 'dup_quo_ground(dup_normal([1,2,3], ZZ), ZZ(0), ZZ)')
raises(ExactQuotientFailed, 'dup_quo_ground(dup_normal([1,2,3], ZZ), ZZ(3), ZZ)')
f = dup_normal([], ZZ)
assert dup_quo_ground(f, ZZ(3), ZZ) == dup_normal([], ZZ)
f = dup_normal([6,2,8], ZZ)
assert dup_quo_ground(f, ZZ(1), ZZ) == f
assert dup_quo_ground(f, ZZ(2), ZZ) == dup_normal([3,1,4], ZZ)
f = dup_normal([6,2,8], QQ)
assert dup_quo_ground(f, QQ(1), QQ) == f
assert dup_quo_ground(f, QQ(2), QQ) == [QQ(3),QQ(1),QQ(4)]
assert dup_quo_ground(f, QQ(7), QQ) == [QQ(6,7),QQ(2,7),QQ(8,7)]
def test_dup_exquo_ground():
raises(ZeroDivisionError, 'dup_exquo_ground(dup_normal([1,2,3], ZZ), ZZ(0), ZZ)')
f = dup_normal([], ZZ)
assert dup_quo_ground(f, ZZ(3), ZZ) == dup_normal([], ZZ)
f = dup_normal([6,2,8], ZZ)
assert dup_exquo_ground(f, ZZ(1), ZZ) == f
assert dup_exquo_ground(f, ZZ(2), ZZ) == dup_normal([3,1,4], ZZ)
assert dup_exquo_ground(f, ZZ(3), ZZ) == dup_normal([2,0,2], ZZ)
f = dup_normal([6,2,8], QQ)
assert dup_exquo_ground(f, QQ(1), QQ) == f
assert dup_exquo_ground(f, QQ(2), QQ) == [QQ(3),QQ(1),QQ(4)]
assert dup_exquo_ground(f, QQ(7), QQ) == [QQ(6,7),QQ(2,7),QQ(8,7)]
def test_dmp_quo_ground():
f = dmp_normal([[6],[2],[8]], 1, ZZ)
assert dmp_quo_ground(f, ZZ(1), 1, ZZ) == f
assert dmp_quo_ground(f, ZZ(2), 1, ZZ) == dmp_normal([[3],[1],[4]], 1, ZZ)
def test_dmp_exquo_ground():
f = dmp_normal([[6],[2],[8]], 1, ZZ)
assert dmp_exquo_ground(f, ZZ(1), 1, ZZ) == f
assert dmp_exquo_ground(f, ZZ(2), 1, ZZ) == dmp_normal([[3],[1],[4]], 1, ZZ)
assert dmp_normal(dmp_exquo_ground(f, ZZ(3), 1, ZZ), 1, ZZ) == dmp_normal([[2],[],[2]], 1, ZZ)
def test_dup_lshift():
assert dup_lshift([], 3, ZZ) == []
assert dup_lshift([1], 3, ZZ) == [1,0,0,0]
def test_dup_rshift():
assert dup_rshift([], 3, ZZ) == []
assert dup_rshift([1,0,0,0], 3, ZZ) == [1]
def test_dup_abs():
assert dup_abs([], ZZ) == []
assert dup_abs([ZZ( 1)], ZZ) == [ZZ(1)]
assert dup_abs([ZZ(-7)], ZZ) == [ZZ(7)]
assert dup_abs([ZZ(-1),ZZ(2),ZZ(3)], ZZ) == [ZZ(1),ZZ(2),ZZ(3)]
assert dup_abs([], QQ) == []
assert dup_abs([QQ( 1,2)], QQ) == [QQ(1,2)]
assert dup_abs([QQ(-7,3)], QQ) == [QQ(7,3)]
assert dup_abs([QQ(-1,7),QQ(2,7),QQ(3,7)], QQ) == [QQ(1,7),QQ(2,7),QQ(3,7)]
def test_dmp_abs():
assert dmp_abs([ZZ(-1)], 0, ZZ) == [ZZ(1)]
assert dmp_abs([QQ(-1,2)], 0, QQ) == [QQ(1,2)]
assert dmp_abs([[[]]], 2, ZZ) == [[[]]]
assert dmp_abs([[[ZZ(1)]]], 2, ZZ) == [[[ZZ(1)]]]
assert dmp_abs([[[ZZ(-7)]]], 2, ZZ) == [[[ZZ(7)]]]
assert dmp_abs([[[]]], 2, QQ) == [[[]]]
assert dmp_abs([[[QQ(1,2)]]], 2, QQ) == [[[QQ(1,2)]]]
assert dmp_abs([[[QQ(-7,9)]]], 2, QQ) == [[[QQ(7,9)]]]
def test_dup_neg():
assert dup_neg([], ZZ) == []
assert dup_neg([ZZ(1)], ZZ) == [ZZ(-1)]
assert dup_neg([ZZ(-7)], ZZ) == [ZZ(7)]
assert dup_neg([ZZ(-1),ZZ(2),ZZ(3)], ZZ) == [ZZ(1),ZZ(-2),ZZ(-3)]
assert dup_neg([], QQ) == []
assert dup_neg([QQ(1,2)], QQ) == [QQ(-1,2)]
assert dup_neg([QQ(-7,9)], QQ) == [QQ(7,9)]
assert dup_neg([QQ(-1,7),QQ(2,7),QQ(3,7)], QQ) == [QQ(1,7),QQ(-2,7),QQ(-3,7)]
def test_dmp_neg():
assert dmp_neg([ZZ(-1)], 0, ZZ) == [ZZ(1)]
assert dmp_neg([QQ(-1,2)], 0, QQ) == [QQ(1,2)]
assert dmp_neg([[[]]], 2, ZZ) == [[[]]]
assert dmp_neg([[[ZZ(1)]]], 2, ZZ) == [[[ZZ(-1)]]]
assert dmp_neg([[[ZZ(-7)]]], 2, ZZ) == [[[ZZ(7)]]]
assert dmp_neg([[[]]], 2, QQ) == [[[]]]
assert dmp_neg([[[QQ(1,9)]]], 2, QQ) == [[[QQ(-1,9)]]]
assert dmp_neg([[[QQ(-7,9)]]], 2, QQ) == [[[QQ(7,9)]]]
def test_dup_add():
assert dup_add([], [], ZZ) == []
assert dup_add([ZZ(1)], [], ZZ) == [ZZ(1)]
assert dup_add([], [ZZ(1)], ZZ) == [ZZ(1)]
assert dup_add([ZZ(1)], [ZZ(1)], ZZ) == [ZZ(2)]
assert dup_add([ZZ(1)], [ZZ(2)], ZZ) == [ZZ(3)]
assert dup_add([ZZ(1),ZZ(2)], [ZZ(1)], ZZ) == [ZZ(1),ZZ(3)]
assert dup_add([ZZ(1)], [ZZ(1),ZZ(2)], ZZ) == [ZZ(1),ZZ(3)]
assert dup_add([ZZ(1),ZZ(2),ZZ(3)], [ZZ(8),ZZ(9),ZZ(10)], ZZ) == [ZZ(9),ZZ(11),ZZ(13)]
assert dup_add([], [], QQ) == []
assert dup_add([QQ(1,2)], [], QQ) == [QQ(1,2)]
assert dup_add([], [QQ(1,2)], QQ) == [QQ(1,2)]
assert dup_add([QQ(1,4)], [QQ(1,4)], QQ) == [QQ(1,2)]
assert dup_add([QQ(1,4)], [QQ(1,2)], QQ) == [QQ(3,4)]
assert dup_add([QQ(1,2),QQ(2,3)], [QQ(1)], QQ) == [QQ(1,2),QQ(5,3)]
assert dup_add([QQ(1)], [QQ(1,2),QQ(2,3)], QQ) == [QQ(1,2),QQ(5,3)]
assert dup_add([QQ(1,7),QQ(2,7),QQ(3,7)], [QQ(8,7),QQ(9,7),QQ(10,7)], QQ) == [QQ(9,7),QQ(11,7),QQ(13,7)]
def test_dmp_add():
assert dmp_add([ZZ(1),ZZ(2)], [ZZ(1)], 0, ZZ) == \
dup_add([ZZ(1),ZZ(2)], [ZZ(1)], ZZ)
assert dmp_add([QQ(1,2),QQ(2,3)], [QQ(1)], 0, QQ) == \
dup_add([QQ(1,2),QQ(2,3)], [QQ(1)], QQ)
assert dmp_add([[[]]], [[[]]], 2, ZZ) == [[[]]]
assert dmp_add([[[ZZ(1)]]], [[[]]], 2, ZZ) == [[[ZZ(1)]]]
assert dmp_add([[[]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(1)]]]
assert dmp_add([[[ZZ(2)]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(3)]]]
assert dmp_add([[[ZZ(1)]]], [[[ZZ(2)]]], 2, ZZ) == [[[ZZ(3)]]]
assert dmp_add([[[]]], [[[]]], 2, QQ) == [[[]]]
assert dmp_add([[[QQ(1,2)]]], [[[]]], 2, QQ) == [[[QQ(1,2)]]]
assert dmp_add([[[]]], [[[QQ(1,2)]]], 2, QQ) == [[[QQ(1,2)]]]
assert dmp_add([[[QQ(2,7)]]], [[[QQ(1,7)]]], 2, QQ) == [[[QQ(3,7)]]]
assert dmp_add([[[QQ(1,7)]]], [[[QQ(2,7)]]], 2, QQ) == [[[QQ(3,7)]]]
def test_dup_sub():
assert dup_sub([], [], ZZ) == []
assert dup_sub([ZZ(1)], [], ZZ) == [ZZ(1)]
assert dup_sub([], [ZZ(1)], ZZ) == [ZZ(-1)]
assert dup_sub([ZZ(1)], [ZZ(1)], ZZ) == []
assert dup_sub([ZZ(1)], [ZZ(2)], ZZ) == [ZZ(-1)]
assert dup_sub([ZZ(1),ZZ(2)], [ZZ(1)], ZZ) == [ZZ(1),ZZ(1)]
assert dup_sub([ZZ(1)], [ZZ(1),ZZ(2)], ZZ) == [ZZ(-1),ZZ(-1)]
assert dup_sub([ZZ(3),ZZ(2),ZZ(1)], [ZZ(8),ZZ(9),ZZ(10)], ZZ) == [ZZ(-5),ZZ(-7),ZZ(-9)]
assert dup_sub([], [], QQ) == []
assert dup_sub([QQ(1,2)], [], QQ) == [QQ(1,2)]
assert dup_sub([], [QQ(1,2)], QQ) == [QQ(-1,2)]
assert dup_sub([QQ(1,3)], [QQ(1,3)], QQ) == []
assert dup_sub([QQ(1,3)], [QQ(2,3)], QQ) == [QQ(-1,3)]
assert dup_sub([QQ(1,7),QQ(2,7)], [QQ(1)], QQ) == [QQ(1,7),QQ(-5,7)]
assert dup_sub([QQ(1)], [QQ(1,7),QQ(2,7)], QQ) == [QQ(-1,7),QQ(5,7)]
assert dup_sub([QQ(3,7),QQ(2,7),QQ(1,7)], [QQ(8,7),QQ(9,7),QQ(10,7)], QQ) == [QQ(-5,7),QQ(-7,7),QQ(-9,7)]
def test_dmp_sub():
assert dmp_sub([ZZ(1),ZZ(2)], [ZZ(1)], 0, ZZ) == \
dup_sub([ZZ(1),ZZ(2)], [ZZ(1)], ZZ)
assert dmp_sub([QQ(1,2),QQ(2,3)], [QQ(1)], 0, QQ) == \
dup_sub([QQ(1,2),QQ(2,3)], [QQ(1)], QQ)
assert dmp_sub([[[]]], [[[]]], 2, ZZ) == [[[]]]
assert dmp_sub([[[ZZ(1)]]], [[[]]], 2, ZZ) == [[[ZZ(1)]]]
assert dmp_sub([[[]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(-1)]]]
assert dmp_sub([[[ZZ(2)]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(1)]]]
assert dmp_sub([[[ZZ(1)]]], [[[ZZ(2)]]], 2, ZZ) == [[[ZZ(-1)]]]
assert dmp_sub([[[]]], [[[]]], 2, QQ) == [[[]]]
assert dmp_sub([[[QQ(1,2)]]], [[[]]], 2, QQ) == [[[QQ(1,2)]]]
assert dmp_sub([[[]]], [[[QQ(1,2)]]], 2, QQ) == [[[QQ(-1,2)]]]
assert dmp_sub([[[QQ(2,7)]]], [[[QQ(1,7)]]], 2, QQ) == [[[QQ(1,7)]]]
assert dmp_sub([[[QQ(1,7)]]], [[[QQ(2,7)]]], 2, QQ) == [[[QQ(-1,7)]]]
def test_dup_add_mul():
assert dup_add_mul([ZZ(1),ZZ(2),ZZ(3)], [ZZ(3),ZZ(2),ZZ(1)],
[ZZ(1),ZZ(2)], ZZ) == [ZZ(3), ZZ(9), ZZ(7), ZZ(5)]
def test_dup_add_mul():
assert dmp_add_mul([[ZZ(1),ZZ(2)],[ZZ(3)]], [[ZZ(3)],[ZZ(2),ZZ(1)]],
[[ZZ(1)],[ZZ(2)]], 1, ZZ) == [[ZZ(3)], [ZZ(3), ZZ(9)], [ZZ(4), ZZ(5)]]
def test_dup_sub_mul():
assert dup_sub_mul([ZZ(1),ZZ(2),ZZ(3)], [ZZ(3),ZZ(2),ZZ(1)],
[ZZ(1),ZZ(2)], ZZ) == [ZZ(-3),ZZ(-7),ZZ(-3), ZZ(1)]
def test_dup_sub_mul():
assert dmp_sub_mul([[ZZ(1),ZZ(2)],[ZZ(3)]], [[ZZ(3)],[ZZ(2),ZZ(1)]],
[[ZZ(1)],[ZZ(2)]], 1, ZZ) == [[ZZ(-3)], [ZZ(-1), ZZ(-5)], [ZZ(-4), ZZ(1)]]
def test_dup_mul():
assert dup_mul([], [], ZZ) == []
assert dup_mul([], [ZZ(1)], ZZ) == []
assert dup_mul([ZZ(1)], [], ZZ) == []
assert dup_mul([ZZ(1)], [ZZ(1)], ZZ) == [ZZ(1)]
assert dup_mul([ZZ(5)], [ZZ(7)], ZZ) == [ZZ(35)]
assert dup_mul([], [], QQ) == []
assert dup_mul([], [QQ(1,2)], QQ) == []
assert dup_mul([QQ(1,2)], [], QQ) == []
assert dup_mul([QQ(1,2)], [QQ(4,7)], QQ) == [QQ(2,7)]
assert dup_mul([QQ(5,7)], [QQ(3,7)], QQ) == [QQ(15,49)]
f = dup_normal([3,0,0,6,1,2], ZZ)
g = dup_normal([4,0,1,0], ZZ)
h = dup_normal([12,0,3,24,4,14,1,2,0], ZZ)
assert dup_mul(f, g, ZZ) == h
assert dup_mul(g, f, ZZ) == h
f = dup_normal([2,0,0,1,7], ZZ)
h = dup_normal([4,0,0,4,28,0,1,14,49], ZZ)
assert dup_mul(f, f, ZZ) == h
def test_dmp_mul():
assert dmp_mul([ZZ(5)], [ZZ(7)], 0, ZZ) == \
dup_mul([ZZ(5)], [ZZ(7)], ZZ)
assert dmp_mul([QQ(5,7)], [QQ(3,7)], 0, QQ) == \
dup_mul([QQ(5,7)], [QQ(3,7)], QQ)
assert dmp_mul([[[]]], [[[]]], 2, ZZ) == [[[]]]
assert dmp_mul([[[ZZ(1)]]], [[[]]], 2, ZZ) == [[[]]]
assert dmp_mul([[[]]], [[[ZZ(1)]]], 2, ZZ) == [[[]]]
assert dmp_mul([[[ZZ(2)]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(2)]]]
assert dmp_mul([[[ZZ(1)]]], [[[ZZ(2)]]], 2, ZZ) == [[[ZZ(2)]]]
assert dmp_mul([[[]]], [[[]]], 2, QQ) == [[[]]]
assert dmp_mul([[[QQ(1,2)]]], [[[]]], 2, QQ) == [[[]]]
assert dmp_mul([[[]]], [[[QQ(1,2)]]], 2, QQ) == [[[]]]
assert dmp_mul([[[QQ(2,7)]]], [[[QQ(1,3)]]], 2, QQ) == [[[QQ(2,21)]]]
assert dmp_mul([[[QQ(1,7)]]], [[[QQ(2,3)]]], 2, QQ) == [[[QQ(2,21)]]]
def test_dup_sqr():
assert dup_sqr([], ZZ) == []
assert dup_sqr([ZZ(2)], ZZ) == [ZZ(4)]
assert dup_sqr([ZZ(1),ZZ(2)], ZZ) == [ZZ(1),ZZ(4),ZZ(4)]
assert dup_sqr([], QQ) == []
assert dup_sqr([QQ(2,3)], QQ) == [QQ(4,9)]
assert dup_sqr([QQ(1,3),QQ(2,3)], QQ) == [QQ(1,9),QQ(4,9),QQ(4,9)]
f = dup_normal([2,0,0,1,7], ZZ)
assert dup_sqr(f, ZZ) == dup_normal([4,0,0,4,28,0,1,14,49], ZZ)
def test_dmp_sqr():
assert dmp_sqr([ZZ(1),ZZ(2)], 0, ZZ) == \
dup_sqr([ZZ(1),ZZ(2)], ZZ)
assert dmp_sqr([[[]]], 2, ZZ) == [[[]]]
assert dmp_sqr([[[ZZ(2)]]], 2, ZZ) == [[[ZZ(4)]]]
assert dmp_sqr([[[]]], 2, QQ) == [[[]]]
assert dmp_sqr([[[QQ(2,3)]]], 2, QQ) == [[[QQ(4,9)]]]
def test_dup_pow():
assert dup_pow([], 0, ZZ) == [ZZ(1)]
assert dup_pow([], 0, QQ) == [QQ(1)]
assert dup_pow([], 1, ZZ) == []
assert dup_pow([], 7, ZZ) == []
assert dup_pow([ZZ(1)], 0, ZZ) == [ZZ(1)]
assert dup_pow([ZZ(1)], 1, ZZ) == [ZZ(1)]
assert dup_pow([ZZ(1)], 7, ZZ) == [ZZ(1)]
assert dup_pow([ZZ(3)], 0, ZZ) == [ZZ(1)]
assert dup_pow([ZZ(3)], 1, ZZ) == [ZZ(3)]
assert dup_pow([ZZ(3)], 7, ZZ) == [ZZ(2187)]
assert dup_pow([QQ(1,1)], 0, QQ) == [QQ(1,1)]
assert dup_pow([QQ(1,1)], 1, QQ) == [QQ(1,1)]
assert dup_pow([QQ(1,1)], 7, QQ) == [QQ(1,1)]
assert dup_pow([QQ(3,7)], 0, QQ) == [QQ(1,1)]
assert dup_pow([QQ(3,7)], 1, QQ) == [QQ(3,7)]
assert dup_pow([QQ(3,7)], 7, QQ) == [QQ(2187,823543)]
f = dup_normal([2,0,0,1,7], ZZ)
assert dup_pow(f, 0, ZZ) == dup_normal([1], ZZ)
assert dup_pow(f, 1, ZZ) == dup_normal([2,0,0,1,7], ZZ)
assert dup_pow(f, 2, ZZ) == dup_normal([4,0,0,4,28,0,1,14,49], ZZ)
assert dup_pow(f, 3, ZZ) == dup_normal([8,0,0,12,84,0,6,84,294,1,21,147,343], ZZ)
def test_dmp_pow():
assert dmp_pow([[]], 0, 1, ZZ) == [[ZZ(1)]]
assert dmp_pow([[]], 0, 1, QQ) == [[QQ(1)]]
assert dmp_pow([[]], 1, 1, ZZ) == [[]]
assert dmp_pow([[]], 7, 1, ZZ) == [[]]
assert dmp_pow([[ZZ(1)]], 0, 1, ZZ) == [[ZZ(1)]]
assert dmp_pow([[ZZ(1)]], 1, 1, ZZ) == [[ZZ(1)]]
assert dmp_pow([[ZZ(1)]], 7, 1, ZZ) == [[ZZ(1)]]
assert dmp_pow([[QQ(3,7)]], 0, 1, QQ) == [[QQ(1,1)]]
assert dmp_pow([[QQ(3,7)]], 1, 1, QQ) == [[QQ(3,7)]]
assert dmp_pow([[QQ(3,7)]], 7, 1, QQ) == [[QQ(2187,823543)]]
f = dup_normal([2,0,0,1,7], ZZ)
assert dmp_pow(f, 2, 0, ZZ) == dup_pow(f, 2, ZZ)
def test_dup_pdiv():
f = dup_normal([3,1,1,5], ZZ)
g = dup_normal([5,-3,1], ZZ)
q = dup_normal([15, 14], ZZ)
r = dup_normal([52, 111], ZZ)
assert dup_pdiv(f, g, ZZ) == (q, r)
assert dup_pexquo(f, g, ZZ) == q
assert dup_prem(f, g, ZZ) == r
raises(ExactQuotientFailed, 'dup_pquo(f, g, ZZ)')
f = dup_normal([3,1,1,5], QQ)
g = dup_normal([5,-3,1], QQ)
q = dup_normal([15, 14], QQ)
r = dup_normal([52, 111], QQ)
assert dup_pdiv(f, g, QQ) == (q, r)
assert dup_pexquo(f, g, QQ) == q
assert dup_prem(f, g, QQ) == r
raises(ExactQuotientFailed, 'dup_pquo(f, g, QQ)')
def test_dmp_pdiv():
f = dmp_normal([[1], [], [1,0,0]], 1, ZZ)
g = dmp_normal([[1], [-1,0]], 1, ZZ)
q = dmp_normal([[1], [1, 0]], 1, ZZ)
r = dmp_normal([[2, 0, 0]], 1, ZZ)
assert dmp_pdiv(f, g, 1, ZZ) == (q, r)
assert dmp_pexquo(f, g, 1, ZZ) == q
assert dmp_prem(f, g, 1, ZZ) == r
raises(ExactQuotientFailed, 'dmp_pquo(f, g, 1, ZZ)')
f = dmp_normal([[1], [], [1,0,0]], 1, ZZ)
g = dmp_normal([[2], [-2,0]], 1, ZZ)
q = dmp_normal([[2], [2, 0]], 1, ZZ)
r = dmp_normal([[8, 0, 0]], 1, ZZ)
assert dmp_pdiv(f, g, 1, ZZ) == (q, r)
assert dmp_pexquo(f, g, 1, ZZ) == q
assert dmp_prem(f, g, 1, ZZ) == r
raises(ExactQuotientFailed, 'dmp_pquo(f, g, 1, ZZ)')
def test_dup_rr_div():
raises(ZeroDivisionError, "dup_rr_div([1,2,3], [], ZZ)")
f = dup_normal([3,1,1,5], ZZ)
g = dup_normal([5,-3,1], ZZ)
q, r = [], f
assert dup_rr_div(f, g, ZZ) == (q, r)
def test_dmp_rr_div():
raises(ZeroDivisionError, "dmp_rr_div([[1,2],[3]], [[]], 1, ZZ)")
f = dmp_normal([[1], [], [1,0,0]], 1, ZZ)
g = dmp_normal([[1], [-1,0]], 1, ZZ)
q = dmp_normal([[1], [1, 0]], 1, ZZ)
r = dmp_normal([[2, 0, 0]], 1, ZZ)
assert dmp_rr_div(f, g, 1, ZZ) == (q, r)
f = dmp_normal([[1], [], [1,0,0]], 1, ZZ)
g = dmp_normal([[-1], [1,0]], 1, ZZ)
q = dmp_normal([[-1], [-1, 0]], 1, ZZ)
r = dmp_normal([[2, 0, 0]], 1, ZZ)
assert dmp_rr_div(f, g, 1, ZZ) == (q, r)
f = dmp_normal([[1], [], [1,0,0]], 1, ZZ)
g = dmp_normal([[2], [-2,0]], 1, ZZ)
q, r = [[]], f
assert dmp_rr_div(f, g, 1, ZZ) == (q, r)
def test_dup_ff_div():
raises(ZeroDivisionError, "dup_ff_div([1,2,3], [], QQ)")
f = dup_normal([3,1,1,5], QQ)
g = dup_normal([5,-3,1], QQ)
q = [QQ(3,5), QQ(14,25)]
r = [QQ(52,25), QQ(111,25)]
assert dup_ff_div(f, g, QQ) == (q, r)
def test_dmp_ff_div():
raises(ZeroDivisionError, "dmp_ff_div([[1,2],[3]], [[]], 1, QQ)")
f = dmp_normal([[1], [], [1,0,0]], 1, QQ)
g = dmp_normal([[1], [-1,0]], 1, QQ)
q = [[QQ(1, 1)], [QQ(1, 1), QQ(0, 1)]]
r = [[QQ(2, 1), QQ(0, 1), QQ(0, 1)]]
assert dmp_ff_div(f, g, 1, QQ) == (q, r)
f = dmp_normal([[1], [], [1,0,0]], 1, QQ)
g = dmp_normal([[-1], [1,0]], 1, QQ)
q = [[QQ(-1, 1)], [QQ(-1, 1), QQ(0, 1)]]
r = [[QQ(2, 1), QQ(0, 1), QQ(0, 1)]]
assert dmp_ff_div(f, g, 1, QQ) == (q, r)
f = dmp_normal([[1], [], [1,0,0]], 1, QQ)
g = dmp_normal([[2], [-2,0]], 1, QQ)
q = [[QQ(1, 2)], [QQ(1, 2), QQ(0, 1)]]
r = [[QQ(2, 1), QQ(0, 1), QQ(0, 1)]]
assert dmp_ff_div(f, g, 1, QQ) == (q, r)
def test_dup_div():
f, g, q, r = [5,4,3,2,1], [1,2,3], [5,-6,0], [20,1]
assert dup_div(f, g, ZZ) == (q, r)
assert dup_exquo(f, g, ZZ) == q
assert dup_rem(f, g, ZZ) == r
raises(ExactQuotientFailed, 'dup_quo(f, g, ZZ)')
f, g, q, r = [5,4,3,2,1,0], [1,2,0,0,9], [5,-6], [15,2,-44,54]
assert dup_div(f, g, ZZ) == (q, r)
assert dup_exquo(f, g, ZZ) == q
assert dup_rem(f, g, ZZ) == r
raises(ExactQuotientFailed, 'dup_quo(f, g, ZZ)')
def test_dmp_div():
f, g, q, r = [5,4,3,2,1], [1,2,3], [5,-6,0], [20,1]
assert dmp_div(f, g, 0, ZZ) == (q, r)
assert dmp_exquo(f, g, 0, ZZ) == q
assert dmp_rem(f, g, 0, ZZ) == r
raises(ExactQuotientFailed, 'dmp_quo(f, g, 0, ZZ)')
f, g, q, r = [[[1]]], [[[2]],[1]], [[[]]], [[[1]]]
assert dmp_div(f, g, 2, ZZ) == (q, r)
assert dmp_exquo(f, g, 2, ZZ) == q
assert dmp_rem(f, g, 2, ZZ) == r
raises(ExactQuotientFailed, 'dmp_quo(f, g, 2, ZZ)')
def test_dup_max_norm():
assert dup_max_norm([], ZZ) == 0
assert dup_max_norm([1], ZZ) == 1
assert dup_max_norm([1,4,2,3], ZZ) == 4
def test_dmp_max_norm():
assert dmp_max_norm([[[]]], 2, ZZ) == 0
assert dmp_max_norm([[[1]]], 2, ZZ) == 1
assert dmp_max_norm(f_0, 2, ZZ) == 6
def test_dup_l1_norm():
assert dup_l1_norm([], ZZ) == 0
assert dup_l1_norm([1], ZZ) == 1
assert dup_l1_norm([1,4,2,3], ZZ) == 10
def test_dmp_l1_norm():
assert dmp_l1_norm([[[]]], 2, ZZ) == 0
assert dmp_l1_norm([[[1]]], 2, ZZ) == 1
assert dmp_l1_norm(f_0, 2, ZZ) == 31
def test_dup_expand():
assert dup_expand((), ZZ) == [1]
assert dup_expand(([1,2,3], [1,2], [7,5,4,3]), ZZ) == \
dup_mul([1,2,3], dup_mul([1,2], [7,5,4,3], ZZ), ZZ)
def test_dmp_expand():
assert dmp_expand((), 1, ZZ) == [[1]]
assert dmp_expand(([[1],[2],[3]], [[1],[2]], [[7],[5],[4],[3]]), 1, ZZ) == \
dmp_mul([[1],[2],[3]], dmp_mul([[1],[2]], [[7],[5],[4],[3]], 1, ZZ), 1, ZZ)
| |
"""Event parser and human readable log generator."""
from datetime import timedelta
from itertools import groupby
import json
import logging
import sqlalchemy
from sqlalchemy.orm import aliased
import voluptuous as vol
from homeassistant.components import sun
from homeassistant.components.history import sqlalchemy_filter_from_include_exclude_conf
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.recorder.models import (
Events,
States,
process_timestamp,
process_timestamp_to_utc_isoformat,
)
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_DOMAIN,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
EVENT_LOGBOOK_ENTRY,
EVENT_STATE_CHANGED,
HTTP_BAD_REQUEST,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, callback, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import (
INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA,
convert_include_exclude_filter,
generate_filter,
)
from homeassistant.helpers.integration_platform import (
async_process_integration_platforms,
)
from homeassistant.loader import bind_hass
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_MESSAGE = "message"
CONF_DOMAINS = "domains"
CONF_ENTITIES = "entities"
CONTINUOUS_DOMAINS = ["proximity", "sensor"]
DOMAIN = "logbook"
GROUP_BY_MINUTES = 15
EMPTY_JSON_OBJECT = "{}"
UNIT_OF_MEASUREMENT_JSON = '"unit_of_measurement":'
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA}, extra=vol.ALLOW_EXTRA
)
HOMEASSISTANT_EVENTS = [
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
]
ALL_EVENT_TYPES = [EVENT_STATE_CHANGED, EVENT_LOGBOOK_ENTRY, *HOMEASSISTANT_EVENTS]
LOG_MESSAGE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
}
)
@bind_hass
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
@bind_hass
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {ATTR_NAME: name, ATTR_MESSAGE: message}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
async def async_setup(hass, config):
"""Logbook setup."""
hass.data[DOMAIN] = {}
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
if entity_id is None and domain is None:
# If there is no entity_id or
# domain, the event will get filtered
# away so we use the "logbook" domain
domain = DOMAIN
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.components.frontend.async_register_built_in_panel(
"logbook", "logbook", "hass:format-list-bulleted-type"
)
conf = config.get(DOMAIN, {})
if conf:
filters = sqlalchemy_filter_from_include_exclude_conf(conf)
entities_filter = convert_include_exclude_filter(conf)
else:
filters = None
entities_filter = None
hass.http.register_view(LogbookView(conf, filters, entities_filter))
hass.services.async_register(DOMAIN, "log", log_message, schema=LOG_MESSAGE_SCHEMA)
await async_process_integration_platforms(hass, DOMAIN, _process_logbook_platform)
return True
async def _process_logbook_platform(hass, domain, platform):
"""Process a logbook platform."""
@callback
def _async_describe_event(domain, event_name, describe_callback):
"""Teach logbook how to describe a new event."""
hass.data[DOMAIN][event_name] = (domain, describe_callback)
platform.async_describe_events(hass, _async_describe_event)
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = "/api/logbook"
name = "api:logbook"
extra_urls = ["/api/logbook/{datetime}"]
def __init__(self, config, filters, entities_filter):
"""Initialize the logbook view."""
self.config = config
self.filters = filters
self.entities_filter = entities_filter
async def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message("Invalid datetime", HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
period = request.query.get("period")
if period is None:
period = 1
else:
period = int(period)
entity_id = request.query.get("entity")
end_time = request.query.get("end_time")
if end_time is None:
start_day = dt_util.as_utc(datetime) - timedelta(days=period - 1)
end_day = start_day + timedelta(days=period)
else:
start_day = datetime
end_day = dt_util.parse_datetime(end_time)
if end_day is None:
return self.json_message("Invalid end_time", HTTP_BAD_REQUEST)
hass = request.app["hass"]
def json_events():
"""Fetch events and generate JSON."""
return self.json(
_get_events(
hass,
self.config,
start_day,
end_day,
entity_id,
self.filters,
self.entities_filter,
)
)
return await hass.async_add_executor_job(json_events)
def humanify(hass, events, entity_attr_cache):
"""Generate a converted list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if Home Assistant stop and start happen in same minute call it restarted
"""
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events, lambda event: event.time_fired_minute // GROUP_BY_MINUTES
):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
if event.domain in CONTINUOUS_DOMAINS:
last_sensor_event[event.entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired_minute in start_stop_events:
continue
start_stop_events[event.time_fired_minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired_minute not in start_stop_events:
continue
start_stop_events[event.time_fired_minute] = 2
# Yield entries
external_events = hass.data.get(DOMAIN, {})
for event in events_batch:
if event.event_type in external_events:
domain, describe_event = external_events[event.event_type]
data = describe_event(event)
data["when"] = event.time_fired_isoformat
data["domain"] = domain
data["context_user_id"] = event.context_user_id
yield data
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.entity_id
domain = event.domain
if (
domain in CONTINUOUS_DOMAINS
and event != last_sensor_event[entity_id]
):
# Skip all but the last sensor state
continue
name = entity_attr_cache.get(
entity_id, ATTR_FRIENDLY_NAME, event
) or split_entity_id(entity_id)[1].replace("_", " ")
yield {
"when": event.time_fired_isoformat,
"name": name,
"message": _entry_message_from_event(
hass, entity_id, domain, event, entity_attr_cache
),
"domain": domain,
"entity_id": entity_id,
"context_user_id": event.context_user_id,
}
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired_minute) == 2:
continue
yield {
"when": event.time_fired_isoformat,
"name": "Home Assistant",
"message": "started",
"domain": HA_DOMAIN,
"context_user_id": event.context_user_id,
}
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired_minute) == 2:
action = "restarted"
else:
action = "stopped"
yield {
"when": event.time_fired_isoformat,
"name": "Home Assistant",
"message": action,
"domain": HA_DOMAIN,
"context_user_id": event.context_user_id,
}
elif event.event_type == EVENT_LOGBOOK_ENTRY:
event_data = event.data
domain = event_data.get(ATTR_DOMAIN)
entity_id = event_data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield {
"when": event.time_fired_isoformat,
"name": event_data.get(ATTR_NAME),
"message": event_data.get(ATTR_MESSAGE),
"domain": domain,
"entity_id": entity_id,
}
def _get_events(
hass, config, start_day, end_day, entity_id=None, filters=None, entities_filter=None
):
"""Get events for a period of time."""
entity_attr_cache = EntityAttributeCache(hass)
def yield_events(query):
"""Yield Events that are not filtered away."""
for row in query.yield_per(1000):
event = LazyEventPartialState(row)
if _keep_event(hass, event, entities_filter):
yield event
with session_scope(hass=hass) as session:
if entity_id is not None:
entity_ids = [entity_id.lower()]
entities_filter = generate_filter([], entity_ids, [], [])
apply_sql_entities_filter = False
else:
entity_ids = None
apply_sql_entities_filter = True
old_state = aliased(States, name="old_state")
query = (
session.query(
Events.event_type,
Events.event_data,
Events.time_fired,
Events.context_user_id,
States.state,
States.entity_id,
States.domain,
States.attributes,
)
.order_by(Events.time_fired)
.outerjoin(States, (Events.event_id == States.event_id))
.outerjoin(old_state, (States.old_state_id == old_state.state_id))
# The below filter, removes state change events that do not have
# and old_state, new_state, or the old and
# new state.
#
.filter(
(Events.event_type != EVENT_STATE_CHANGED)
| (
(States.state_id.isnot(None))
& (old_state.state_id.isnot(None))
& (States.state.isnot(None))
& (States.state != old_state.state)
)
)
#
# Prefilter out continuous domains that have
# ATTR_UNIT_OF_MEASUREMENT as its much faster in sql.
#
.filter(
(Events.event_type != EVENT_STATE_CHANGED)
| sqlalchemy.not_(States.domain.in_(CONTINUOUS_DOMAINS))
| sqlalchemy.not_(States.attributes.contains(UNIT_OF_MEASUREMENT_JSON))
)
.filter(
Events.event_type.in_(ALL_EVENT_TYPES + list(hass.data.get(DOMAIN, {})))
)
.filter((Events.time_fired > start_day) & (Events.time_fired < end_day))
)
if entity_ids:
query = query.filter(
(
(States.last_updated == States.last_changed)
& States.entity_id.in_(entity_ids)
)
| (States.state_id.is_(None))
)
else:
query = query.filter(
(States.last_updated == States.last_changed)
| (States.state_id.is_(None))
)
if apply_sql_entities_filter and filters:
entity_filter = filters.entity_filter()
if entity_filter is not None:
query = query.filter(
entity_filter | (Events.event_type != EVENT_STATE_CHANGED)
)
return list(humanify(hass, yield_events(query), entity_attr_cache))
def _keep_event(hass, event, entities_filter):
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.entity_id
elif event.event_type in HOMEASSISTANT_EVENTS:
entity_id = f"{HA_DOMAIN}."
elif event.event_type in hass.data[DOMAIN] and ATTR_ENTITY_ID not in event.data:
# If the entity_id isn't described, use the domain that describes
# the event for filtering.
domain = hass.data[DOMAIN][event.event_type][0]
if domain is None:
return False
entity_id = f"{domain}."
else:
event_data = event.data
entity_id = event_data.get(ATTR_ENTITY_ID)
if entity_id is None:
domain = event_data.get(ATTR_DOMAIN)
if domain is None:
return False
entity_id = f"{domain}."
return entities_filter is None or entities_filter(entity_id)
def _entry_message_from_event(hass, entity_id, domain, event, entity_attr_cache):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
state_state = event.state
if domain in ["device_tracker", "person"]:
if state_state == STATE_NOT_HOME:
return "is away"
return f"is at {state_state}"
if domain == "sun":
if state_state == sun.STATE_ABOVE_HORIZON:
return "has risen"
return "has set"
if domain == "binary_sensor":
device_class = entity_attr_cache.get(entity_id, ATTR_DEVICE_CLASS, event)
if device_class == "battery":
if state_state == STATE_ON:
return "is low"
if state_state == STATE_OFF:
return "is normal"
if device_class == "connectivity":
if state_state == STATE_ON:
return "is connected"
if state_state == STATE_OFF:
return "is disconnected"
if device_class in ["door", "garage_door", "opening", "window"]:
if state_state == STATE_ON:
return "is opened"
if state_state == STATE_OFF:
return "is closed"
if device_class == "lock":
if state_state == STATE_ON:
return "is unlocked"
if state_state == STATE_OFF:
return "is locked"
if device_class == "plug":
if state_state == STATE_ON:
return "is plugged in"
if state_state == STATE_OFF:
return "is unplugged"
if device_class == "presence":
if state_state == STATE_ON:
return "is at home"
if state_state == STATE_OFF:
return "is away"
if device_class == "safety":
if state_state == STATE_ON:
return "is unsafe"
if state_state == STATE_OFF:
return "is safe"
if device_class in [
"cold",
"gas",
"heat",
"light",
"moisture",
"motion",
"occupancy",
"power",
"problem",
"smoke",
"sound",
"vibration",
]:
if state_state == STATE_ON:
return f"detected {device_class}"
if state_state == STATE_OFF:
return f"cleared (no {device_class} detected)"
if state_state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
if state_state == STATE_OFF:
return "turned off"
return f"changed to {state_state}"
class LazyEventPartialState:
"""A lazy version of core Event with limited State joined in."""
__slots__ = [
"_row",
"_event_data",
"_time_fired",
"_time_fired_isoformat",
"_attributes",
"event_type",
"entity_id",
"state",
"domain",
]
def __init__(self, row):
"""Init the lazy event."""
self._row = row
self._event_data = None
self._time_fired = None
self._time_fired_isoformat = None
self._attributes = None
self.event_type = self._row.event_type
self.entity_id = self._row.entity_id
self.state = self._row.state
self.domain = self._row.domain
@property
def context_user_id(self):
"""Context user id of event."""
return self._row.context_user_id
@property
def attributes(self):
"""State attributes."""
if not self._attributes:
if (
self._row.attributes is None
or self._row.attributes == EMPTY_JSON_OBJECT
):
self._attributes = {}
else:
self._attributes = json.loads(self._row.attributes)
return self._attributes
@property
def data(self):
"""Event data."""
if not self._event_data:
if self._row.event_data == EMPTY_JSON_OBJECT:
self._event_data = {}
else:
self._event_data = json.loads(self._row.event_data)
return self._event_data
@property
def time_fired_minute(self):
"""Minute the event was fired not converted."""
return self._row.time_fired.minute
@property
def time_fired(self):
"""Time event was fired in utc."""
if not self._time_fired:
self._time_fired = (
process_timestamp(self._row.time_fired) or dt_util.utcnow()
)
return self._time_fired
@property
def time_fired_isoformat(self):
"""Time event was fired in utc isoformat."""
if not self._time_fired_isoformat:
if self._time_fired:
self._time_fired_isoformat = self._time_fired.isoformat()
else:
self._time_fired_isoformat = process_timestamp_to_utc_isoformat(
self._row.time_fired or dt_util.utcnow()
)
return self._time_fired_isoformat
class EntityAttributeCache:
"""A cache to lookup static entity_id attribute.
This class should not be used to lookup attributes
that are expected to change state.
"""
def __init__(self, hass):
"""Init the cache."""
self._hass = hass
self._cache = {}
def get(self, entity_id, attribute, event):
"""Lookup an attribute for an entity or get it from the cache."""
if entity_id in self._cache:
if attribute in self._cache[entity_id]:
return self._cache[entity_id][attribute]
else:
self._cache[entity_id] = {}
current_state = self._hass.states.get(entity_id)
if current_state:
# Try the current state as its faster than decoding the
# attributes
self._cache[entity_id][attribute] = current_state.attributes.get(attribute)
else:
# If the entity has been removed, decode the attributes
# instead
self._cache[entity_id][attribute] = event.attributes.get(attribute)
return self._cache[entity_id][attribute]
| |
# Pansharpened Image Process using Rasterio
# Landsat Util
# License: CC0 1.0 Universal
import warnings
import sys
from os.path import join, isdir
import tarfile
import glob
import subprocess
import numpy
import rasterio
from rasterio.warp import reproject, RESAMPLING, transform
from skimage import transform as sktransform
from skimage.util import img_as_ubyte
from skimage.exposure import rescale_intensity
import settings
from mixins import VerbosityMixin
from utils import get_file, timer, check_create_folder, exit
class FileDoesNotExist(Exception):
""" Exception to be used when the file does not exist. """
pass
class Process(VerbosityMixin):
"""
Image procssing class
To initiate the following parameters must be passed:
:param path:
Path of the image.
:type path:
String
:param bands:
The band sequence for the final image. Must be a python list. (optional)
:type bands:
List
:param dst_path:
Path to the folder where the image should be stored. (optional)
:type dst_path:
String
:param verbose:
Whether the output should be verbose. Default is False.
:type verbose:
boolean
:param force_unzip:
Whether to force unzip the tar file. Default is False
:type force_unzip:
boolean
"""
def __init__(self, path, bands=None, dst_path=None, verbose=False, force_unzip=False):
self.projection = {'init': 'epsg:3857'}
self.dst_crs = {'init': u'epsg:3857'}
self.scene = get_file(path).split('.')[0]
self.bands = bands if isinstance(bands, list) else [4, 3, 2]
# Landsat source path
self.src_path = path.replace(get_file(path), '')
# Build destination folder if doesn't exits
self.dst_path = dst_path if dst_path else settings.PROCESSED_IMAGE
self.dst_path = check_create_folder(join(self.dst_path, self.scene))
self.verbose = verbose
# Path to the unzipped folder
self.scene_path = join(self.src_path, self.scene)
if self._check_if_zipped(path):
self._unzip(join(self.src_path, get_file(path)), join(self.src_path, self.scene), self.scene, force_unzip)
self.bands_path = []
for band in self.bands:
self.bands_path.append(join(self.scene_path, self._get_full_filename(band)))
def run(self, pansharpen=True):
""" Executes the image processing.
:param pansharpen:
Whether the process should also run pansharpenning. Default is True
:type pansharpen:
boolean
:returns:
(String) the path to the processed image
"""
self.output("* Image processing started for bands %s" % "-".join(map(str, self.bands)), normal=True)
# Read cloud coverage from mtl file
cloud_cover = 0
try:
with open(self.scene_path + '/' + self.scene + '_MTL.txt', 'rU') as mtl:
lines = mtl.readlines()
for line in lines:
if 'CLOUD_COVER' in line:
cloud_cover = float(line.replace('CLOUD_COVER = ', ''))
break
except IOError:
pass
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with rasterio.drivers():
bands = []
# Add band 8 for pansharpenning
if pansharpen:
self.bands.append(8)
bands_path = []
for band in self.bands:
bands_path.append(join(self.scene_path, self._get_full_filename(band)))
try:
for i, band in enumerate(self.bands):
bands.append(self._read_band(bands_path[i]))
except IOError as e:
exit(e.message, 1)
src = rasterio.open(bands_path[-1])
# Get pixel size from source
self.pixel = src.affine[0]
# Only collect src data that is needed and delete the rest
src_data = {
'transform': src.transform,
'crs': src.crs,
'affine': src.affine,
'shape': src.shape
}
del src
crn = self._get_boundaries(src_data)
dst_shape = src_data['shape']
dst_corner_ys = [crn[k]['y'][1][0] for k in crn.keys()]
dst_corner_xs = [crn[k]['x'][1][0] for k in crn.keys()]
y_pixel = abs(max(dst_corner_ys) - min(dst_corner_ys)) / dst_shape[0]
x_pixel = abs(max(dst_corner_xs) - min(dst_corner_xs)) / dst_shape[1]
dst_transform = (min(dst_corner_xs),
x_pixel,
0.0,
max(dst_corner_ys),
0.0,
-y_pixel)
# Delete crn since no longer needed
del crn
new_bands = []
for i in range(0, 3):
new_bands.append(numpy.empty(dst_shape, dtype=numpy.uint16))
if pansharpen:
bands[:3] = self._rescale(bands[:3])
new_bands.append(numpy.empty(dst_shape, dtype=numpy.uint16))
self.output("Projecting", normal=True, arrow=True)
for i, band in enumerate(bands):
self.output("band %s" % self.bands[i], normal=True, color='green', indent=1)
reproject(band, new_bands[i], src_transform=src_data['transform'], src_crs=src_data['crs'],
dst_transform=dst_transform, dst_crs=self.dst_crs, resampling=RESAMPLING.nearest)
# Bands are no longer needed
del bands
if pansharpen:
new_bands = self._pansharpenning(new_bands)
del self.bands[3]
self.output("Final Steps", normal=True, arrow=True)
output_file = '%s_bands_%s' % (self.scene, "".join(map(str, self.bands)))
if pansharpen:
output_file += '_pan'
output_file += '.TIF'
output_file = join(self.dst_path, output_file)
output = rasterio.open(output_file, 'w', driver='GTiff',
width=dst_shape[1], height=dst_shape[0],
count=3, dtype=numpy.uint8,
nodata=0, transform=dst_transform, photometric='RGB',
crs=self.dst_crs)
for i, band in enumerate(new_bands):
# Color Correction
band = self._color_correction(band, self.bands[i], 0, cloud_cover)
output.write_band(i+1, img_as_ubyte(band))
new_bands[i] = None
self.output("Writing to file", normal=True, color='green', indent=1)
return output_file
def _pansharpenning(self, bands):
self.output("Pansharpening", normal=True, arrow=True)
# Pan sharpening
m = sum(bands[:3])
m = m + 0.1
self.output("calculating pan ratio", normal=True, color='green', indent=1)
pan = 1/m * bands[-1]
del m
del bands[3]
self.output("computing bands", normal=True, color='green', indent=1)
for i, band in enumerate(bands):
bands[i] = band * pan
del pan
return bands
def _color_correction(self, band, band_id, low, cloud_cover):
band = band.astype(numpy.uint16)
self.output("Color correcting band %s" % band_id, normal=True, color='green', indent=1)
p_low, cloud_cut_low = self._percent_cut(band, low, 100 - (cloud_cover * 3 / 4))
temp = numpy.zeros(numpy.shape(band), dtype=numpy.uint16)
cloud_divide = 65000 - cloud_cover * 100
mask = numpy.logical_and(band < cloud_cut_low, band > 0)
temp[mask] = rescale_intensity(band[mask], in_range=(p_low, cloud_cut_low), out_range=(256, cloud_divide))
temp[band >= cloud_cut_low] = rescale_intensity(band[band >= cloud_cut_low], out_range=(cloud_divide, 65535))
return temp
def _read_band(self, band_path):
""" Reads a band with rasterio """
return rasterio.open(band_path).read_band(1)
def _rescale(self, bands):
""" Rescale bands """
self.output("Rescaling", normal=True, arrow=True)
for key, band in enumerate(bands):
self.output("band %s" % self.bands[key], normal=True, color='green', indent=1)
bands[key] = sktransform.rescale(band, 2)
bands[key] = (bands[key] * 65535).astype('uint16')
return bands
def _get_boundaries(self, src):
self.output("Getting boundaries", normal=True, arrow=True)
output = {'ul': {'x': [0, 0], 'y': [0, 0]}, # ul: upper left
'ur': {'x': [0, 0], 'y': [0, 0]}, # ur: upper right
'll': {'x': [0, 0], 'y': [0, 0]}, # ll: lower left
'lr': {'x': [0, 0], 'y': [0, 0]}} # lr: lower right
output['ul']['x'][0] = src['affine'][2]
output['ul']['y'][0] = src['affine'][5]
output['ur']['x'][0] = output['ul']['x'][0] + self.pixel * src['shape'][1]
output['ur']['y'][0] = output['ul']['y'][0]
output['ll']['x'][0] = output['ul']['x'][0]
output['ll']['y'][0] = output['ul']['y'][0] - self.pixel * src['shape'][0]
output['lr']['x'][0] = output['ul']['x'][0] + self.pixel * src['shape'][1]
output['lr']['y'][0] = output['ul']['y'][0] - self.pixel * src['shape'][0]
output['ul']['x'][1], output['ul']['y'][1] = transform(src['crs'], self.projection,
[output['ul']['x'][0]],
[output['ul']['y'][0]])
output['ur']['x'][1], output['ur']['y'][1] = transform(src['crs'], self.projection,
[output['ur']['x'][0]],
[output['ur']['y'][0]])
output['ll']['x'][1], output['ll']['y'][1] = transform(src['crs'], self.projection,
[output['ll']['x'][0]],
[output['ll']['y'][0]])
output['lr']['x'][1], output['lr']['y'][1] = transform(src['crs'], self.projection,
[output['lr']['x'][0]],
[output['lr']['y'][0]])
return output
def _percent_cut(self, color, low, high):
return numpy.percentile(color[numpy.logical_and(color > 0, color < 65535)], (low, high))
def _unzip(self, src, dst, scene, force_unzip=False):
""" Unzip tar files """
self.output("Unzipping %s - It might take some time" % scene, normal=True, arrow=True)
try:
# check if file is already unzipped, skip
if isdir(dst) and not force_unzip:
self.output("%s is already unzipped." % scene, normal=True, arrow=True)
return
else:
tar = tarfile.open(src, 'r')
tar.extractall(path=dst)
tar.close()
except tarfile.ReadError:
check_create_folder(dst)
subprocess.check_call(['tar', '-xf', src, '-C', dst])
def _get_full_filename(self, band):
base_file = '%s_B%s.*' % (self.scene, band)
try:
return glob.glob(join(self.scene_path, base_file))[0].split('/')[-1]
except IndexError:
raise FileDoesNotExist('%s does not exist' % '%s_B%s.*' % (self.scene, band))
def _check_if_zipped(self, path):
""" Checks if the filename shows a tar/zip file """
filename = get_file(path).split('.')
if filename[-1] in ['bz', 'bz2']:
return True
return False
if __name__ == '__main__':
with timer():
p = Process(sys.argv[1])
print p.run(sys.argv[2] == 't')
| |
#system
import json
#sbaas
from .stage01_resequencing_analysis_query import stage01_resequencing_analysis_query
from .stage01_resequencing_coverage_query import stage01_resequencing_coverage_query
from .stage01_resequencing_coverage_dependencies import stage01_resequencing_coverage_dependencies
#sbaas models
from .stage01_resequencing_coverage_postgresql_models import *
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from sequencing_analysis.gff_coverage import gff_coverage
from ddt_python.ddt_container import ddt_container
class stage01_resequencing_coverage_io(
stage01_resequencing_coverage_query,
stage01_resequencing_coverage_dependencies,
stage01_resequencing_analysis_query,
sbaas_template_io
):
def import_resequencingCoverageData_add(self, filename,
#analysis_id,
experiment_id, sample_name,strand_start,strand_stop,scale_factor=True,downsample_factor=2000):
'''table adds
NOTE: multiple chromosomes not yet supported in sequencing_utilities'''
#OPTION1
gffcoverage = gff_coverage();
coverage_data = [];
if '.bam' in filename:
#TODO convert .bam to .gff using makegff.py from sequencing_utilities
print('conversion of .bam to .gff not yet supported');
exit(2);
#filename_bam = filename;
#filename = filename.replace('.bam','.gff');
#extract_strandsFromGff(filename_bam,filename,separate_strand=False);
# convert strings to float and int
strand_start, strand_stop, scale_factor, downsample_factor = int(strand_start), int(strand_stop), bool(scale_factor), float(downsample_factor);
#OPTION1
# parse the gff file
gffcoverage.extract_coverage_fromGff(filename, strand_start, strand_stop, scale_factor=scale_factor, downsample_factor=downsample_factor,experiment_id_I = experiment_id,sample_name_I=sample_name);
coverage_data = gffcoverage.coverage;
##OPTION2
## parse the gff file
#coverage_data = [];
#coverage_data = self.extract_coverage_fromGff(filename, strand_start, strand_stop, scale_factor=scale_factor, downsample_factor=downsample_factor,experiment_id_I = experiment_id,sample_name_I=sample_name);
# add data to the database:
self.add_dataStage01ResequencingCoverage(coverage_data);
def export_dataStage01ResequencingAmplifications_js(self,analysis_id_I,data_dir_I="tmp"):
"""export amplifications and statistics to js file"""
ddtutilities = ddt_container()
# get the analysis info
#analysis_info = {};
#analysis_info = self.get_analysis_analysisID_dataStage01ResequencingAnalysis(analysis_id_I);
experiment_ids = []
lineage_names = []
sample_names = []
time_points = []
experiment_ids,lineage_names,sample_names,time_points = self.get_experimentIDAndLineageNameAndSampleNameAndTimePoint_analysisID_dataStage01ResequencingAnalysis(analysis_id_I);
# convert time_point to intermediates
time_points_int = [int(x) for x in time_points];
intermediates,time_points,experiment_ids,sample_names,lineage_names = (list(t) for t in zip(*sorted(zip(time_points_int,time_points,experiment_ids,sample_names,lineage_names))))
intermediates = [i for i,x in enumerate(intermediates)];
#get the data for the analysis
data1_O = [];
data2_O = [];
data3_O = [];
for sn_cnt,sn in enumerate(sample_names):
data1_tmp = [];
data1_tmp = self.get_rows_experimentIDAndSampleName_dataStage01ResequencingAmplifications_visualization(experiment_ids[sn_cnt],sn);
data1_O.extend(ddtutilities.make_listDict_JSONAndJSCompatible(data1_tmp));
data2_tmp = [];
data2_tmp = self.get_rows_experimentIDAndSampleName_dataStage01ResequencingAmplificationStats(experiment_ids[sn_cnt],sn);
data2_O.extend(ddtutilities.make_listDict_JSONAndJSCompatible(data2_tmp));
data3_tmp = [];
data3_tmp = self.get_rows_experimentIDAndSampleName_dataStage01ResequencingAmplificationAnnotations(experiment_ids[sn_cnt],sn);
data3_O.extend(ddtutilities.make_listDict_JSONAndJSCompatible(data3_tmp));
# dump chart parameters to a js files
data1_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
'amplification_start',
'amplification_stop',
'sample_name_strand',
]
data1_nestkeys = [
#'sample_name',
'genome_strand'
];
data1_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'sample_name_strand',#custom for vis
#'serieslabel':'genome_strand',
'featureslabel':'reads'};
data2_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
#'reads_min',
#'reads_max',
#'reads_lb',
#'reads_ub',
#'reads_iq1',
#'reads_iq3',
#'reads_median',
#'reads_mean',
#'reads_var',
#'reads_n',
'amplification_start',
'amplification_stop',
]
data2_nestkeys = ['sample_name'];
data2_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'genome_strand',
'featureslabel':'reads'};
data3_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
'feature_annotations',
'feature_genes',
'feature_locations',
'feature_links',
'feature_start',
'feature_stop',
'feature_types',
'amplification_start',
'amplification_stop',
]
data3_nestkeys = ['sample_name'];
data3_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'genome_strand',
'featureslabel':'reads'};
# make the data object
dataobject_O = [{"data":data1_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},
{"data":data2_O,"datakeys":data2_keys,"datanestkeys":data2_nestkeys},
{"data":data3_O,"datakeys":data3_keys,"datanestkeys":data3_nestkeys}
];
# make the tile parameter objects
# linked set #1
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"index","svgy1axislabel":"reads",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1',
"svgx1axistickformat":".2e",
"svgx1axisticktextattr":{"transform":"matrix(0,1,-1,0,16,6)",
#"transform":'rotate(90)',"transform":'translate(0,10)'
},
"svgx1axisticktextstyle":{"text-anchor":"start"}
};
svgtileparameters_O = {'tileheader':'Amplifications','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
# linked set #2
formtileparameters2_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu2",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters2_O = {'htmlid':'filtermenuform2',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit2','text':'submit'},"formresetbuttonidtext":{'id':'reset2','text':'reset'},"formupdatebuttonidtext":{'id':'update2','text':'update'}};
formtileparameters2_O.update(formparameters2_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu2','tableresetbuttonid':'reset2','tablesubmitbuttonid':'submit2'};
tabletileparameters_O = {'tileheader':'Amplification statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
# linked set #3
formtileparameters3_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu3",'rowid':"row3",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters3_O = {'htmlid':'filtermenuform3',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit3','text':'submit'},"formresetbuttonidtext":{'id':'reset3','text':'reset'},"formupdatebuttonidtext":{'id':'update3','text':'update'}};
formtileparameters3_O.update(formparameters3_O);
tableparameters2_O = {"tabletype":'responsivetable_01',
'tableid':'table2',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu3','tableresetbuttonid':'reset3','tablesubmitbuttonid':'submit3'};
tabletileparameters2_O = {'tileheader':'Amplification annotations','tiletype':'table','tileid':"tile4",'rowid':"row3",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters2_O.update(tableparameters2_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,formtileparameters2_O,tabletileparameters_O,formtileparameters3_O,tabletileparameters2_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,0],"tile3":[1],"tile4":[2],"filtermenu2":[1],"filtermenu3":[2]};
filtermenuobject_O = [{"filtermenuid":"filtermenu1","filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1","filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"},{"filtermenuid":"filtermenu2","filtermenuhtmlid":"filtermenuform2",
"filtermenusubmitbuttonid":"submit2","filtermenuresetbuttonid":"reset2",
"filtermenuupdatebuttonid":"update2"},{"filtermenuid":"filtermenu3","filtermenuhtmlid":"filtermenuform3",
"filtermenusubmitbuttonid":"submit3","filtermenuresetbuttonid":"reset3",
"filtermenuupdatebuttonid":"update3"}];
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage01ResequencingCoverage_js(self,analysis_id_I,data_dir_I="tmp"):
"""export heatmap to js file"""
# get the analysis info
#analysis_info = {};
#analysis_info = self.get_analysis_analysisID_dataStage01ResequencingAnalysis(analysis_id_I);
experiment_ids = []
lineage_names = []
sample_names = []
time_points = []
experiment_ids,lineage_names,sample_names,time_points = self.get_experimentIDAndLineageNameAndSampleNameAndTimePoint_analysisID_dataStage01ResequencingAnalysis(analysis_id_I);
# convert time_point to intermediates
time_points_int = [int(x) for x in time_points];
intermediates,time_points,experiment_ids,sample_names,lineage_names = (list(t) for t in zip(*sorted(zip(time_points_int,time_points,experiment_ids,sample_names,lineage_names))))
intermediates = [i for i,x in enumerate(intermediates)];
#get the data for the analysis
data1_O = [];
data2_O = [];
for sn_cnt,sn in enumerate(sample_names):
data1_tmp = [];
data1_tmp = self.get_rows_experimentIDAndSampleName_dataStage01ResequencingCoverage_visualization(experiment_ids[sn_cnt],sn);
data1_O.extend(data1_tmp);
data2_tmp = [];
data2_tmp = self.get_rows_experimentIDAndSampleName_dataStage01ResequencingCoverageStats(experiment_ids[sn_cnt],sn);
data2_O.extend(data2_tmp);
# dump chart parameters to a js files
data1_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
'sample_name_strand'
]
data1_nestkeys = [
#'sample_name',
'genome_strand'
];
data1_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'sample_name_strand',#custom for vis
'featureslabel':'reads'};
data2_keys = ['experiment_id',
'sample_name',
'genome_chromosome',
'genome_strand',
#'strand_start',
#'strand_stop',
#'reads_min',
#'reads_max',
#'reads_lb',
#'reads_ub',
#'reads_iq1',
#'reads_iq3',
#'reads_median',
#'reads_mean',
#'reads_var',
#'reads_n',
'amplification_start',
'amplification_stop',
'used_',
'comment_'
]
data2_nestkeys = ['sample_name'];
data2_keymap = {'xdata':'genome_index',
'ydata':'reads',
'serieslabel':'genome_strand',
'featureslabel':'reads'};
# make the data object
dataobject_O = [{"data":data1_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys},{"data":data2_O,"datakeys":data2_keys,"datanestkeys":data2_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'scatterplot2d_01',"svgkeymap":[data1_keymap,data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,"svgheight":350,
"svgx1axislabel":"index","svgy1axislabel":"reads",
'svgformtileid':'filtermenu1','svgresetbuttonid':'reset1','svgsubmitbuttonid':'submit1',
"svgx1axistickformat":".2e",
"svgx1axisticktextattr":{"transform":"matrix(0,1,-1,0,16,6)",
#"transform":'rotate(90)',"transform":'translate(0,10)'
},
"svgx1axisticktextstyle":{"text-anchor":"start"}
};
svgtileparameters_O = {'tileheader':'Resequencing coverage','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tablefilters":None,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'Resequencing coverage statistics','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0,0],"tile3":[1]};
# dump the data to a json file
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = None);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
| |
# No cancel button.
from pywin.mfc import dialog
from pywin.mfc.thread import WinThread
import threading
import win32ui
import win32con
import win32api
import time
def MakeProgressDlgTemplate(caption, staticText = ""):
style = (win32con.DS_MODALFRAME |
win32con.WS_POPUP |
win32con.WS_VISIBLE |
win32con.WS_CAPTION |
win32con.WS_SYSMENU |
win32con.DS_SETFONT)
cs = (win32con.WS_CHILD |
win32con.WS_VISIBLE)
w = 215
h = 36 # With button
h = 40
dlg = [[caption,
(0, 0, w, h),
style,
None,
(8, "MS Sans Serif")],
]
s = win32con.WS_TABSTOP | cs
dlg.append([130, staticText, 1000, (7, 7, w-7, h-32), cs | win32con.SS_LEFT])
# dlg.append([128,
# "Cancel",
# win32con.IDCANCEL,
# (w - 60, h - 18, 50, 14), s | win32con.BS_PUSHBUTTON])
return dlg
class CStatusProgressDialog(dialog.Dialog):
def __init__(self, title, msg = "", maxticks = 100, tickincr = 1):
self.initMsg = msg
templ = MakeProgressDlgTemplate(title, msg)
dialog.Dialog.__init__(self, templ)
self.maxticks = maxticks
self.tickincr = tickincr
self.pbar = None
def OnInitDialog(self):
rc = dialog.Dialog.OnInitDialog(self)
self.static = self.GetDlgItem(1000)
self.pbar = win32ui.CreateProgressCtrl()
self.pbar.CreateWindow (win32con.WS_CHILD |
win32con.WS_VISIBLE,
(10, 30, 310, 44),
self, 1001)
self.pbar.SetRange(0, self.maxticks)
self.pbar.SetStep(self.tickincr)
self.progress = 0
self.pincr = 5
return rc
def Close(self):
self.EndDialog(0)
def SetMaxTicks(self, maxticks):
if self.pbar is not None:
self.pbar.SetRange(0, maxticks)
def Tick(self):
if self.pbar is not None:
self.pbar.StepIt()
def SetTitle(self, text):
self.SetWindowText(text)
def SetText(self, text):
self.SetDlgItemText(1000, text)
def Set(self, pos, max = None):
if self.pbar is not None:
self.pbar.SetPos(pos)
if max is not None:
self.pbar.SetRange(0, max)
# a progress dialog created in a new thread - especially suitable for
# console apps with no message loop.
MYWM_SETTITLE = win32con.WM_USER+10
MYWM_SETMSG = win32con.WM_USER+11
MYWM_TICK = win32con.WM_USER+12
MYWM_SETMAXTICKS = win32con.WM_USER+13
MYWM_SET = win32con.WM_USER+14
class CThreadedStatusProcessDialog(CStatusProgressDialog):
def __init__(self, title, msg = "", maxticks = 100, tickincr = 1):
self.title = title
self.msg = msg
self.threadid = win32api.GetCurrentThreadId()
CStatusProgressDialog.__init__(self, title, msg, maxticks, tickincr)
def OnInitDialog(self):
rc = CStatusProgressDialog.OnInitDialog(self)
self.HookMessage(self.OnTitle, MYWM_SETTITLE)
self.HookMessage(self.OnMsg, MYWM_SETMSG)
self.HookMessage(self.OnTick, MYWM_TICK)
self.HookMessage(self.OnMaxTicks, MYWM_SETMAXTICKS)
self.HookMessage(self.OnSet, MYWM_SET)
return rc
def _Send(self, msg):
try:
self.PostMessage(msg)
except win32ui.error:
# the user closed the window - but this does not cancel the
# process - so just ignore it.
pass
def OnTitle(self, msg):
CStatusProgressDialog.SetTitle(self, self.title)
def OnMsg(self, msg):
CStatusProgressDialog.SetText(self, self.msg)
def OnTick(self, msg):
CStatusProgressDialog.Tick(self)
def OnMaxTicks(self, msg):
CStatusProgressDialog.SetMaxTicks(self, self.maxticks)
def OnSet(self, msg):
CStatusProgressDialog.Set(self, self.pos, self.max)
def Close(self):
assert self.threadid, "No thread!"
win32api.PostThreadMessage(self.threadid, win32con.WM_QUIT, 0, 0)
def SetMaxTicks(self, maxticks):
self.maxticks = maxticks
self._Send(MYWM_SETMAXTICKS)
def SetTitle(self, title):
self.title = title
self._Send(MYWM_SETTITLE)
def SetText(self, text):
self.msg = text
self._Send(MYWM_SETMSG)
def Tick(self):
self._Send(MYWM_TICK)
def Set(self, pos, max = None):
self.pos = pos
self.max = max
self._Send(MYWM_SET)
class ProgressThread(WinThread):
def __init__(self, title, msg = "", maxticks = 100, tickincr = 1):
self.title = title
self.msg = msg
self.maxticks = maxticks
self.tickincr = tickincr
self.dialog = None
WinThread.__init__(self)
self.createdEvent = threading.Event()
def InitInstance(self):
self.dialog = CThreadedStatusProcessDialog( self.title, self.msg, self.maxticks, self.tickincr)
self.dialog.CreateWindow()
try:
self.dialog.SetForegroundWindow()
except win32ui.error:
pass
self.createdEvent.set()
return WinThread.InitInstance(self)
def ExitInstance(self):
return 0
def StatusProgressDialog(title, msg = "", maxticks = 100, parent = None):
d = CStatusProgressDialog (title, msg, maxticks)
d.CreateWindow (parent)
return d
def ThreadedStatusProgressDialog(title, msg = "", maxticks = 100):
t = ProgressThread(title, msg, maxticks)
t.CreateThread()
# Need to run a basic "PumpWaitingMessages" loop just incase we are
# running inside Pythonwin.
# Basic timeout incase things go terribly wrong. Ideally we should use
# win32event.MsgWaitForMultipleObjects(), but we use a threading module
# event - so use a dumb strategy
end_time = time.time() + 10
while time.time() < end_time:
if t.createdEvent.isSet():
break
win32ui.PumpWaitingMessages()
time.sleep(0.1)
return t.dialog
def demo():
d = StatusProgressDialog("A Demo", "Doing something...")
import win32api
for i in range(100):
if i == 50:
d.SetText("Getting there...")
if i==90:
d.SetText("Nearly done...")
win32api.Sleep(20)
d.Tick()
d.Close()
def thread_demo():
d = ThreadedStatusProgressDialog("A threaded demo", "Doing something")
import win32api
for i in range(100):
if i == 50:
d.SetText("Getting there...")
if i==90:
d.SetText("Nearly done...")
win32api.Sleep(20)
d.Tick()
d.Close()
if __name__=='__main__':
thread_demo()
#demo()
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import queue as Queue
import threading
import time
from rally.common import logging
from rally.common import utils
from rally.common import validation
from rally import consts
from rally.task import runner
LOG = logging.getLogger(__name__)
def _worker_process(queue, iteration_gen, timeout, times, max_concurrent,
context, cls, method_name, args, event_queue, aborted,
runs_per_second, rps_cfg, processes_to_start, info):
"""Start scenario within threads.
Spawn N threads per second. Each thread runs the scenario once, and appends
result to queue. A maximum of max_concurrent threads will be ran
concurrently.
:param queue: queue object to append results
:param iteration_gen: next iteration number generator
:param timeout: operation's timeout
:param times: total number of scenario iterations to be run
:param max_concurrent: maximum worker concurrency
:param context: scenario context object
:param cls: scenario class
:param method_name: scenario method name
:param args: scenario args
:param aborted: multiprocessing.Event that aborts load generation if
the flag is set
:param runs_per_second: function that should return desired rps value
:param rps_cfg: rps section from task config
:param processes_to_start: int, number of started processes for scenario
execution
:param info: info about all processes count and counter of runned process
"""
pool = collections.deque()
if isinstance(rps_cfg, dict):
rps = rps_cfg["start"]
else:
rps = rps_cfg
sleep = 1.0 / rps
runner._log_worker_info(times=times, rps=rps, timeout=timeout,
cls=cls, method_name=method_name, args=args)
time.sleep(
(sleep * info["processes_counter"]) / info["processes_to_start"])
start = time.time()
timeout_queue = Queue.Queue()
if timeout:
collector_thr_by_timeout = threading.Thread(
target=utils.timeout_thread,
args=(timeout_queue, )
)
collector_thr_by_timeout.start()
i = 0
while i < times and not aborted.is_set():
scenario_context = runner._get_scenario_context(next(iteration_gen),
context)
worker_args = (
queue, cls, method_name, scenario_context, args, event_queue)
thread = threading.Thread(target=runner._worker_thread,
args=worker_args)
i += 1
thread.start()
if timeout:
timeout_queue.put((thread, time.time() + timeout))
pool.append(thread)
time_gap = time.time() - start
real_rps = i / time_gap if time_gap else "Infinity"
LOG.debug(
"Worker: %s rps: %s (requested rps: %s)" %
(i, real_rps, runs_per_second(rps_cfg, start, processes_to_start)))
# try to join latest thread(s) until it finished, or until time to
# start new thread (if we have concurrent slots available)
while i / (time.time() - start) > runs_per_second(
rps_cfg, start, processes_to_start) or (
len(pool) >= max_concurrent):
if pool:
pool[0].join(0.001)
if not pool[0].is_alive():
pool.popleft()
else:
time.sleep(0.001)
while pool:
pool.popleft().join()
if timeout:
timeout_queue.put((None, None,))
collector_thr_by_timeout.join()
@validation.configure("check_rps")
class CheckPRSValidator(validation.Validator):
"""Additional schema validation for rps runner"""
def validate(self, context, config, plugin_cls, plugin_cfg):
if isinstance(plugin_cfg["rps"], dict):
if plugin_cfg["rps"]["end"] < plugin_cfg["rps"]["start"]:
msg = "rps end value must not be less than rps start value."
return self.fail(msg)
@validation.add("check_rps")
@runner.configure(name="rps")
class RPSScenarioRunner(runner.ScenarioRunner):
"""Scenario runner that does the job with specified frequency.
Every single scenario iteration is executed with specified frequency
(runs per second) in a pool of processes. The scenario will be
launched for a fixed number of times in total (specified in the config).
An example of a rps scenario is booting 1 VM per second. This
execution type is thus very helpful in understanding the maximal load that
a certain cloud can handle.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA7,
"properties": {
"times": {
"type": "integer",
"minimum": 1
},
"rps": {
"anyOf": [
{
"description": "Generate constant requests per second "
"during the whole workload.",
"type": "number",
"exclusiveMinimum": 0,
"minimum": 0
},
{
"type": "object",
"description": "Increase requests per second for "
"specified value each time after a "
"certain number of seconds.",
"properties": {
"start": {
"type": "number",
"minimum": 1
},
"end": {
"type": "number",
"minimum": 1
},
"step": {
"type": "number",
"minimum": 1
},
"duration": {
"type": "number",
"minimum": 1
}
},
"additionalProperties": False,
"required": ["start", "end", "step"]
}
],
},
"timeout": {
"type": "number",
},
"max_concurrency": {
"type": "integer",
"minimum": 1
},
"max_cpu_count": {
"type": "integer",
"minimum": 1
}
},
"required": ["times", "rps"],
"additionalProperties": False
}
def _run_scenario(self, cls, method_name, context, args):
"""Runs the specified scenario with given arguments.
Every single scenario iteration is executed with specified
frequency (runs per second) in a pool of processes. The scenario is
launched for a fixed number of times in total (specified in the
config).
:param cls: The Scenario class where the scenario is implemented
:param method_name: Name of the method that implements the scenario
:param context: Context that contains users, admin & other
information, that was created before scenario
execution starts.
:param args: Arguments to call the scenario method with
:returns: List of results fore each single scenario iteration,
where each result is a dictionary
"""
times = self.config["times"]
timeout = self.config.get("timeout", 0) # 0 means no timeout
iteration_gen = utils.RAMInt()
cpu_count = multiprocessing.cpu_count()
max_cpu_used = min(cpu_count,
self.config.get("max_cpu_count", cpu_count))
def runs_per_second(rps_cfg, start_timer, number_of_processes):
"""At the given second return desired rps."""
if not isinstance(rps_cfg, dict):
return float(rps_cfg) / number_of_processes
stage_order = (time.time() - start_timer) / rps_cfg.get(
"duration", 1) - 1
rps = (float(rps_cfg["start"] + rps_cfg["step"] * stage_order)
/ number_of_processes)
return min(rps, float(rps_cfg["end"]))
processes_to_start = min(max_cpu_used, times,
self.config.get("max_concurrency", times))
times_per_worker, times_overhead = divmod(times, processes_to_start)
# Determine concurrency per worker
concurrency_per_worker, concurrency_overhead = divmod(
self.config.get("max_concurrency", times), processes_to_start)
self._log_debug_info(times=times, timeout=timeout,
max_cpu_used=max_cpu_used,
processes_to_start=processes_to_start,
times_per_worker=times_per_worker,
times_overhead=times_overhead,
concurrency_per_worker=concurrency_per_worker,
concurrency_overhead=concurrency_overhead)
result_queue = multiprocessing.Queue()
event_queue = multiprocessing.Queue()
def worker_args_gen(times_overhead, concurrency_overhead):
"""Generate arguments for process worker.
Remainder of threads per process division is distributed to
process workers equally - one thread per each process worker
until the remainder equals zero. The same logic is applied
to concurrency overhead.
:param times_overhead: remaining number of threads to be
distributed to workers
:param concurrency_overhead: remaining number of maximum
concurrent threads to be
distributed to workers
"""
while True:
yield (
result_queue, iteration_gen, timeout,
times_per_worker + (times_overhead and 1),
concurrency_per_worker + (concurrency_overhead and 1),
context, cls, method_name, args, event_queue,
self.aborted, runs_per_second, self.config["rps"],
processes_to_start
)
if times_overhead:
times_overhead -= 1
if concurrency_overhead:
concurrency_overhead -= 1
process_pool = self._create_process_pool(
processes_to_start, _worker_process,
worker_args_gen(times_overhead, concurrency_overhead))
self._join_processes(process_pool, result_queue, event_queue)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Traits handlers for Placement API."""
import jsonschema
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import webob
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.schemas import trait as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _normalize_traits_qs_param(qs):
try:
op, value = qs.split(':', 1)
except ValueError:
msg = _('Badly formatted name parameter. Expected name query string '
'parameter in form: '
'?name=[in|startswith]:[name1,name2|prefix]. Got: "%s"')
msg = msg % qs
raise webob.exc.HTTPBadRequest(msg)
filters = {}
if op == 'in':
filters['name_in'] = value.split(',')
elif op == 'startswith':
filters['prefix'] = value
return filters
def _serialize_traits(traits, want_version):
last_modified = None
get_last_modified = want_version.matches((1, 15))
trait_names = []
for trait in traits:
if get_last_modified:
last_modified = util.pick_last_modified(last_modified, trait)
trait_names.append(trait.name)
# If there were no traits, set last_modified to now
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return {'traits': trait_names}, last_modified
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def put_trait(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
name = util.wsgi_path_item(req.environ, 'name')
try:
jsonschema.validate(name, schema.CUSTOM_TRAIT)
except jsonschema.ValidationError:
raise webob.exc.HTTPBadRequest(
_('The trait is invalid. A valid trait must be no longer than '
'255 characters, start with the prefix "CUSTOM_" and use '
'following characters: "A"-"Z", "0"-"9" and "_"'))
trait = rp_obj.Trait(context)
trait.name = name
try:
trait.create()
req.response.status = 201
except exception.TraitExists:
# Get the trait that already exists to get last-modified time.
if want_version.matches((1, 15)):
trait = rp_obj.Trait.get_by_name(context, name)
req.response.status = 204
req.response.content_type = None
req.response.location = util.trait_url(req.environ, trait)
if want_version.matches((1, 15)):
req.response.last_modified = trait.created_at
req.response.cache_control = 'no-cache'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def get_trait(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
name = util.wsgi_path_item(req.environ, 'name')
try:
trait = rp_obj.Trait.get_by_name(context, name)
except exception.TraitNotFound as ex:
raise webob.exc.HTTPNotFound(ex.format_message())
req.response.status = 204
req.response.content_type = None
if want_version.matches((1, 15)):
req.response.last_modified = trait.created_at
req.response.cache_control = 'no-cache'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def delete_trait(req):
context = req.environ['placement.context']
name = util.wsgi_path_item(req.environ, 'name')
try:
trait = rp_obj.Trait.get_by_name(context, name)
trait.destroy()
except exception.TraitNotFound as ex:
raise webob.exc.HTTPNotFound(ex.format_message())
except exception.TraitCannotDeleteStandard as ex:
raise webob.exc.HTTPBadRequest(ex.format_message())
except exception.TraitInUse as ex:
raise webob.exc.HTTPConflict(ex.format_message())
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
@util.check_accept('application/json')
def list_traits(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
filters = {}
util.validate_query_params(req, schema.LIST_TRAIT_SCHEMA)
if 'name' in req.GET:
filters = _normalize_traits_qs_param(req.GET['name'])
if 'associated' in req.GET:
if req.GET['associated'].lower() not in ['true', 'false']:
raise webob.exc.HTTPBadRequest(
_('The query parameter "associated" only accepts '
'"true" or "false"'))
filters['associated'] = (
True if req.GET['associated'].lower() == 'true' else False)
traits = rp_obj.TraitList.get_all(context, filters)
req.response.status = 200
output, last_modified = _serialize_traits(traits, want_version)
if want_version.matches((1, 15)):
req.response.last_modified = last_modified
req.response.cache_control = 'no-cache'
req.response.body = encodeutils.to_utf8(jsonutils.dumps(output))
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
@util.check_accept('application/json')
def list_traits_for_resource_provider(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
# Resource provider object is needed for two things: If it is
# NotFound we'll get a 404 here, which needs to happen because
# get_all_by_resource_provider can return an empty list.
# It is also needed for the generation, used in the outgoing
# representation.
try:
rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %(uuid)s found: %(error)s") %
{'uuid': uuid, 'error': exc})
traits = rp_obj.TraitList.get_all_by_resource_provider(context, rp)
response_body, last_modified = _serialize_traits(traits, want_version)
response_body["resource_provider_generation"] = rp.generation
if want_version.matches((1, 15)):
req.response.last_modified = last_modified
req.response.cache_control = 'no-cache'
req.response.status = 200
req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body))
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
@util.require_content('application/json')
def update_traits_for_resource_provider(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
data = util.extract_json(req.body, schema.SET_TRAITS_FOR_RP_SCHEMA)
rp_gen = data['resource_provider_generation']
traits = data['traits']
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
if resource_provider.generation != rp_gen:
raise webob.exc.HTTPConflict(
_("Resource provider's generation already changed. Please update "
"the generation and try again."),
json_formatter=util.json_error_formatter)
trait_objs = rp_obj.TraitList.get_all(
context, filters={'name_in': traits})
traits_name = set([obj.name for obj in trait_objs])
non_existed_trait = set(traits) - set(traits_name)
if non_existed_trait:
raise webob.exc.HTTPBadRequest(
_("No such trait %s") % ', '.join(non_existed_trait))
resource_provider.set_traits(trait_objs)
response_body, last_modified = _serialize_traits(trait_objs, want_version)
response_body[
'resource_provider_generation'] = resource_provider.generation
if want_version.matches((1, 15)):
req.response.last_modified = last_modified
req.response.cache_control = 'no-cache'
req.response.status = 200
req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body))
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def delete_traits_for_resource_provider(req):
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
try:
resource_provider.set_traits(rp_obj.TraitList(objects=[]))
except exception.ConcurrentUpdateDetected as e:
raise webob.exc.HTTPConflict(e.format_message())
req.response.status = 204
req.response.content_type = None
return req.response
| |
import warnings
from itertools import product
import numpy as np
import pandas as pd
import pytest
from pandas.errors import OutOfBoundsDatetime
from xarray import DataArray, Dataset, Variable, coding, decode_cf
from xarray.coding.times import (
_import_cftime,
cftime_to_nptime,
decode_cf_datetime,
encode_cf_datetime,
to_timedelta_unboxed,
)
from xarray.coding.variables import SerializationWarning
from xarray.conventions import _update_bounds_attributes, cf_encoder
from xarray.core.common import contains_cftime_datetimes
from xarray.testing import assert_equal
from . import (
arm_xfail,
assert_array_equal,
has_cftime,
has_cftime_or_netCDF4,
has_dask,
requires_cftime,
requires_cftime_or_netCDF4,
)
_NON_STANDARD_CALENDARS_SET = {
"noleap",
"365_day",
"360_day",
"julian",
"all_leap",
"366_day",
}
_ALL_CALENDARS = sorted(
_NON_STANDARD_CALENDARS_SET.union(coding.times._STANDARD_CALENDARS)
)
_NON_STANDARD_CALENDARS = sorted(_NON_STANDARD_CALENDARS_SET)
_STANDARD_CALENDARS = sorted(coding.times._STANDARD_CALENDARS)
_CF_DATETIME_NUM_DATES_UNITS = [
(np.arange(10), "days since 2000-01-01"),
(np.arange(10).astype("float64"), "days since 2000-01-01"),
(np.arange(10).astype("float32"), "days since 2000-01-01"),
(np.arange(10).reshape(2, 5), "days since 2000-01-01"),
(12300 + np.arange(5), "hours since 1680-01-01 00:00:00"),
# here we add a couple minor formatting errors to test
# the robustness of the parsing algorithm.
(12300 + np.arange(5), "hour since 1680-01-01 00:00:00"),
(12300 + np.arange(5), "Hour since 1680-01-01 00:00:00"),
(12300 + np.arange(5), " Hour since 1680-01-01 00:00:00 "),
(10, "days since 2000-01-01"),
([10], "daYs since 2000-01-01"),
([[10]], "days since 2000-01-01"),
([10, 10], "days since 2000-01-01"),
(np.array(10), "days since 2000-01-01"),
(0, "days since 1000-01-01"),
([0], "days since 1000-01-01"),
([[0]], "days since 1000-01-01"),
(np.arange(2), "days since 1000-01-01"),
(np.arange(0, 100000, 20000), "days since 1900-01-01"),
(17093352.0, "hours since 1-1-1 00:00:0.0"),
([0.5, 1.5], "hours since 1900-01-01T00:00:00"),
(0, "milliseconds since 2000-01-01T00:00:00"),
(0, "microseconds since 2000-01-01T00:00:00"),
(np.int32(788961600), "seconds since 1981-01-01"), # GH2002
(12300 + np.arange(5), "hour since 1680-01-01 00:00:00.500000"),
]
_CF_DATETIME_TESTS = [
num_dates_units + (calendar,)
for num_dates_units, calendar in product(
_CF_DATETIME_NUM_DATES_UNITS, _STANDARD_CALENDARS
)
]
def _all_cftime_date_types():
try:
import cftime
except ImportError:
import netcdftime as cftime
return {
"noleap": cftime.DatetimeNoLeap,
"365_day": cftime.DatetimeNoLeap,
"360_day": cftime.Datetime360Day,
"julian": cftime.DatetimeJulian,
"all_leap": cftime.DatetimeAllLeap,
"366_day": cftime.DatetimeAllLeap,
"gregorian": cftime.DatetimeGregorian,
"proleptic_gregorian": cftime.DatetimeProlepticGregorian,
}
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize(["num_dates", "units", "calendar"], _CF_DATETIME_TESTS)
def test_cf_datetime(num_dates, units, calendar):
cftime = _import_cftime()
if cftime.__name__ == "cftime":
expected = cftime.num2date(
num_dates, units, calendar, only_use_cftime_datetimes=True
)
else:
expected = cftime.num2date(num_dates, units, calendar)
min_y = np.ravel(np.atleast_1d(expected))[np.nanargmin(num_dates)].year
max_y = np.ravel(np.atleast_1d(expected))[np.nanargmax(num_dates)].year
if min_y >= 1678 and max_y < 2262:
expected = cftime_to_nptime(expected)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
actual = coding.times.decode_cf_datetime(num_dates, units, calendar)
abs_diff = np.asarray(abs(actual - expected)).ravel()
abs_diff = pd.to_timedelta(abs_diff.tolist()).to_numpy()
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, "s")).all()
encoded, _, _ = coding.times.encode_cf_datetime(actual, units, calendar)
if "1-1-1" not in units:
# pandas parses this date very strangely, so the original
# units/encoding cannot be preserved in this case:
# (Pdb) pd.to_datetime('1-1-1 00:00:0.0')
# Timestamp('2001-01-01 00:00:00')
assert_array_equal(num_dates, np.around(encoded, 1))
if hasattr(num_dates, "ndim") and num_dates.ndim == 1 and "1000" not in units:
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index
encoded, _, _ = coding.times.encode_cf_datetime(
pd.Index(actual), units, calendar
)
assert_array_equal(num_dates, np.around(encoded, 1))
@requires_cftime_or_netCDF4
def test_decode_cf_datetime_overflow():
# checks for
# https://github.com/pydata/pandas/issues/14068
# https://github.com/pydata/xarray/issues/975
try:
from cftime import DatetimeGregorian
except ImportError:
from netcdftime import DatetimeGregorian
datetime = DatetimeGregorian
units = "days since 2000-01-01 00:00:00"
# date after 2262 and before 1678
days = (-117608, 95795)
expected = (datetime(1677, 12, 31), datetime(2262, 4, 12))
for i, day in enumerate(days):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
result = coding.times.decode_cf_datetime(day, units)
assert result == expected[i]
def test_decode_cf_datetime_non_standard_units():
expected = pd.date_range(periods=100, start="1970-01-01", freq="h")
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by cftime, but pd.Timestamp works
units = "hours since 1-1-1970"
actual = coding.times.decode_cf_datetime(np.arange(100), units)
assert_array_equal(actual, expected)
@requires_cftime_or_netCDF4
def test_decode_cf_datetime_non_iso_strings():
# datetime strings that are _almost_ ISO compliant but not quite,
# but which cftime.num2date can still parse correctly
expected = pd.date_range(periods=100, start="2000-01-01", freq="h")
cases = [
(np.arange(100), "hours since 2000-01-01 0"),
(np.arange(100), "hours since 2000-1-1 0"),
(np.arange(100), "hours since 2000-01-01 0:00"),
]
for num_dates, units in cases:
actual = coding.times.decode_cf_datetime(num_dates, units)
abs_diff = abs(actual - expected.values)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_decode_standard_calendar_inside_timestamp_range(calendar):
cftime = _import_cftime()
units = "days since 0001-01-01"
times = pd.date_range("2001-04-01-00", end="2001-04-30-23", freq="H")
time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar)
expected = times.values
expected_dtype = np.dtype("M8[ns]")
actual = coding.times.decode_cf_datetime(time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
def test_decode_non_standard_calendar_inside_timestamp_range(calendar):
cftime = _import_cftime()
units = "days since 0001-01-01"
times = pd.date_range("2001-04-01-00", end="2001-04-30-23", freq="H")
non_standard_time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar)
if cftime.__name__ == "cftime":
expected = cftime.num2date(
non_standard_time, units, calendar=calendar, only_use_cftime_datetimes=True
)
else:
expected = cftime.num2date(non_standard_time, units, calendar=calendar)
expected_dtype = np.dtype("O")
actual = coding.times.decode_cf_datetime(
non_standard_time, units, calendar=calendar
)
assert actual.dtype == expected_dtype
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_decode_dates_outside_timestamp_range(calendar):
from datetime import datetime
cftime = _import_cftime()
units = "days since 0001-01-01"
times = [datetime(1, 4, 1, h) for h in range(1, 5)]
time = cftime.date2num(times, units, calendar=calendar)
if cftime.__name__ == "cftime":
expected = cftime.num2date(
time, units, calendar=calendar, only_use_cftime_datetimes=True
)
else:
expected = cftime.num2date(time, units, calendar=calendar)
expected_date_type = type(expected[0])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
actual = coding.times.decode_cf_datetime(time, units, calendar=calendar)
assert all(isinstance(value, expected_date_type) for value in actual)
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_decode_standard_calendar_single_element_inside_timestamp_range(calendar):
units = "days since 0001-01-01"
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
actual = coding.times.decode_cf_datetime(num_time, units, calendar=calendar)
assert actual.dtype == np.dtype("M8[ns]")
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
def test_decode_non_standard_calendar_single_element_inside_timestamp_range(calendar):
units = "days since 0001-01-01"
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
actual = coding.times.decode_cf_datetime(num_time, units, calendar=calendar)
assert actual.dtype == np.dtype("O")
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
def test_decode_single_element_outside_timestamp_range(calendar):
cftime = _import_cftime()
units = "days since 0001-01-01"
for days in [1, 1470376]:
for num_time in [days, [days], [[days]]]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
actual = coding.times.decode_cf_datetime(
num_time, units, calendar=calendar
)
if cftime.__name__ == "cftime":
expected = cftime.num2date(
days, units, calendar, only_use_cftime_datetimes=True
)
else:
expected = cftime.num2date(days, units, calendar)
assert isinstance(actual.item(), type(expected))
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_decode_standard_calendar_multidim_time_inside_timestamp_range(calendar):
cftime = _import_cftime()
units = "days since 0001-01-01"
times1 = pd.date_range("2001-04-01", end="2001-04-05", freq="D")
times2 = pd.date_range("2001-05-01", end="2001-05-05", freq="D")
time1 = cftime.date2num(times1.to_pydatetime(), units, calendar=calendar)
time2 = cftime.date2num(times2.to_pydatetime(), units, calendar=calendar)
mdim_time = np.empty((len(time1), 2))
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
expected1 = times1.values
expected2 = times2.values
actual = coding.times.decode_cf_datetime(mdim_time, units, calendar=calendar)
assert actual.dtype == np.dtype("M8[ns]")
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, "s")).all()
assert (abs_diff2 <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range(calendar):
cftime = _import_cftime()
units = "days since 0001-01-01"
times1 = pd.date_range("2001-04-01", end="2001-04-05", freq="D")
times2 = pd.date_range("2001-05-01", end="2001-05-05", freq="D")
time1 = cftime.date2num(times1.to_pydatetime(), units, calendar=calendar)
time2 = cftime.date2num(times2.to_pydatetime(), units, calendar=calendar)
mdim_time = np.empty((len(time1), 2))
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
if cftime.__name__ == "cftime":
expected1 = cftime.num2date(
time1, units, calendar, only_use_cftime_datetimes=True
)
expected2 = cftime.num2date(
time2, units, calendar, only_use_cftime_datetimes=True
)
else:
expected1 = cftime.num2date(time1, units, calendar)
expected2 = cftime.num2date(time2, units, calendar)
expected_dtype = np.dtype("O")
actual = coding.times.decode_cf_datetime(mdim_time, units, calendar=calendar)
assert actual.dtype == expected_dtype
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, "s")).all()
assert (abs_diff2 <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_decode_multidim_time_outside_timestamp_range(calendar):
from datetime import datetime
cftime = _import_cftime()
units = "days since 0001-01-01"
times1 = [datetime(1, 4, day) for day in range(1, 6)]
times2 = [datetime(1, 5, day) for day in range(1, 6)]
time1 = cftime.date2num(times1, units, calendar=calendar)
time2 = cftime.date2num(times2, units, calendar=calendar)
mdim_time = np.empty((len(time1), 2))
mdim_time[:, 0] = time1
mdim_time[:, 1] = time2
if cftime.__name__ == "cftime":
expected1 = cftime.num2date(
time1, units, calendar, only_use_cftime_datetimes=True
)
expected2 = cftime.num2date(
time2, units, calendar, only_use_cftime_datetimes=True
)
else:
expected1 = cftime.num2date(time1, units, calendar)
expected2 = cftime.num2date(time2, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unable to decode time axis")
actual = coding.times.decode_cf_datetime(mdim_time, units, calendar=calendar)
assert actual.dtype == np.dtype("O")
abs_diff1 = abs(actual[:, 0] - expected1)
abs_diff2 = abs(actual[:, 1] - expected2)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
assert (abs_diff1 <= np.timedelta64(1, "s")).all()
assert (abs_diff2 <= np.timedelta64(1, "s")).all()
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("calendar", ["360_day", "all_leap", "366_day"])
def test_decode_non_standard_calendar_single_element(calendar):
cftime = _import_cftime()
units = "days since 0001-01-01"
try:
dt = cftime.netcdftime.datetime(2001, 2, 29)
except AttributeError:
# Must be using the standalone cftime library
dt = cftime.datetime(2001, 2, 29)
num_time = cftime.date2num(dt, units, calendar)
actual = coding.times.decode_cf_datetime(num_time, units, calendar=calendar)
if cftime.__name__ == "cftime":
expected = np.asarray(
cftime.num2date(num_time, units, calendar, only_use_cftime_datetimes=True)
)
else:
expected = np.asarray(cftime.num2date(num_time, units, calendar))
assert actual.dtype == np.dtype("O")
assert expected == actual
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
def test_decode_360_day_calendar():
cftime = _import_cftime()
calendar = "360_day"
# ensure leap year doesn't matter
for year in [2010, 2011, 2012, 2013, 2014]:
units = f"days since {year}-01-01"
num_times = np.arange(100)
if cftime.__name__ == "cftime":
expected = cftime.num2date(
num_times, units, calendar, only_use_cftime_datetimes=True
)
else:
expected = cftime.num2date(num_times, units, calendar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
actual = coding.times.decode_cf_datetime(
num_times, units, calendar=calendar
)
assert len(w) == 0
assert actual.dtype == np.dtype("O")
assert_array_equal(actual, expected)
@arm_xfail
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize(
["num_dates", "units", "expected_list"],
[
([np.nan], "days since 2000-01-01", ["NaT"]),
([np.nan, 0], "days since 2000-01-01", ["NaT", "2000-01-01T00:00:00Z"]),
(
[np.nan, 0, 1],
"days since 2000-01-01",
["NaT", "2000-01-01T00:00:00Z", "2000-01-02T00:00:00Z"],
),
],
)
def test_cf_datetime_nan(num_dates, units, expected_list):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "All-NaN")
actual = coding.times.decode_cf_datetime(num_dates, units)
# use pandas because numpy will deprecate timezone-aware conversions
expected = pd.to_datetime(expected_list)
assert_array_equal(expected, actual)
@requires_cftime_or_netCDF4
def test_decoded_cf_datetime_array_2d():
# regression test for GH1229
variable = Variable(
("x", "y"), np.array([[0, 1], [2, 3]]), {"units": "days since 2000-01-01"}
)
result = coding.times.CFDatetimeCoder().decode(variable)
assert result.dtype == "datetime64[ns]"
expected = pd.date_range("2000-01-01", periods=4).values.reshape(2, 2)
assert_array_equal(np.asarray(result), expected)
@pytest.mark.parametrize(
["dates", "expected"],
[
(pd.date_range("1900-01-01", periods=5), "days since 1900-01-01 00:00:00"),
(
pd.date_range("1900-01-01 12:00:00", freq="H", periods=2),
"hours since 1900-01-01 12:00:00",
),
(
pd.to_datetime(["1900-01-01", "1900-01-02", "NaT"]),
"days since 1900-01-01 00:00:00",
),
(
pd.to_datetime(["1900-01-01", "1900-01-02T00:00:00.005"]),
"seconds since 1900-01-01 00:00:00",
),
(pd.to_datetime(["NaT", "1900-01-01"]), "days since 1900-01-01 00:00:00"),
(pd.to_datetime(["NaT"]), "days since 1970-01-01 00:00:00"),
],
)
def test_infer_datetime_units(dates, expected):
assert expected == coding.times.infer_datetime_units(dates)
_CFTIME_DATETIME_UNITS_TESTS = [
([(1900, 1, 1), (1900, 1, 1)], "days since 1900-01-01 00:00:00.000000"),
(
[(1900, 1, 1), (1900, 1, 2), (1900, 1, 2, 0, 0, 1)],
"seconds since 1900-01-01 00:00:00.000000",
),
(
[(1900, 1, 1), (1900, 1, 8), (1900, 1, 16)],
"days since 1900-01-01 00:00:00.000000",
),
]
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize(
"calendar", _NON_STANDARD_CALENDARS + ["gregorian", "proleptic_gregorian"]
)
@pytest.mark.parametrize(("date_args", "expected"), _CFTIME_DATETIME_UNITS_TESTS)
def test_infer_cftime_datetime_units(calendar, date_args, expected):
date_type = _all_cftime_date_types()[calendar]
dates = [date_type(*args) for args in date_args]
assert expected == coding.times.infer_datetime_units(dates)
@pytest.mark.parametrize(
["timedeltas", "units", "numbers"],
[
("1D", "days", np.int64(1)),
(["1D", "2D", "3D"], "days", np.array([1, 2, 3], "int64")),
("1h", "hours", np.int64(1)),
("1ms", "milliseconds", np.int64(1)),
("1us", "microseconds", np.int64(1)),
(["NaT", "0s", "1s"], None, [np.nan, 0, 1]),
(["30m", "60m"], "hours", [0.5, 1.0]),
("NaT", "days", np.nan),
(["NaT", "NaT"], "days", [np.nan, np.nan]),
],
)
def test_cf_timedelta(timedeltas, units, numbers):
if timedeltas == "NaT":
timedeltas = np.timedelta64("NaT", "ns")
else:
timedeltas = to_timedelta_unboxed(timedeltas)
numbers = np.array(numbers)
expected = numbers
actual, _ = coding.times.encode_cf_timedelta(timedeltas, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
if units is not None:
expected = timedeltas
actual = coding.times.decode_cf_timedelta(numbers, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
expected = np.timedelta64("NaT", "ns")
actual = coding.times.decode_cf_timedelta(np.array(np.nan), "days")
assert_array_equal(expected, actual)
def test_cf_timedelta_2d():
timedeltas = ["1D", "2D", "3D"]
units = "days"
numbers = np.atleast_2d([1, 2, 3])
timedeltas = np.atleast_2d(to_timedelta_unboxed(timedeltas))
expected = timedeltas
actual = coding.times.decode_cf_timedelta(numbers, units)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
@pytest.mark.parametrize(
["deltas", "expected"],
[
(pd.to_timedelta(["1 day", "2 days"]), "days"),
(pd.to_timedelta(["1h", "1 day 1 hour"]), "hours"),
(pd.to_timedelta(["1m", "2m", np.nan]), "minutes"),
(pd.to_timedelta(["1m3s", "1m4s"]), "seconds"),
],
)
def test_infer_timedelta_units(deltas, expected):
assert expected == coding.times.infer_timedelta_units(deltas)
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize(
["date_args", "expected"],
[
((1, 2, 3, 4, 5, 6), "0001-02-03 04:05:06.000000"),
((10, 2, 3, 4, 5, 6), "0010-02-03 04:05:06.000000"),
((100, 2, 3, 4, 5, 6), "0100-02-03 04:05:06.000000"),
((1000, 2, 3, 4, 5, 6), "1000-02-03 04:05:06.000000"),
],
)
def test_format_cftime_datetime(date_args, expected):
date_types = _all_cftime_date_types()
for date_type in date_types.values():
result = coding.times.format_cftime_datetime(date_type(*date_args))
assert result == expected
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_decode_cf(calendar):
days = [1.0, 2.0, 3.0]
da = DataArray(days, coords=[days], dims=["time"], name="test")
ds = da.to_dataset()
for v in ["test", "time"]:
ds[v].attrs["units"] = "days since 2001-01-01"
ds[v].attrs["calendar"] = calendar
if not has_cftime_or_netCDF4 and calendar not in _STANDARD_CALENDARS:
with pytest.raises(ValueError):
ds = decode_cf(ds)
else:
ds = decode_cf(ds)
if calendar not in _STANDARD_CALENDARS:
assert ds.test.dtype == np.dtype("O")
else:
assert ds.test.dtype == np.dtype("M8[ns]")
def test_decode_cf_time_bounds():
da = DataArray(
np.arange(6, dtype="int64").reshape((3, 2)),
coords={"time": [1, 2, 3]},
dims=("time", "nbnd"),
name="time_bnds",
)
attrs = {
"units": "days since 2001-01",
"calendar": "standard",
"bounds": "time_bnds",
}
ds = da.to_dataset()
ds["time"].attrs.update(attrs)
_update_bounds_attributes(ds.variables)
assert ds.variables["time_bnds"].attrs == {
"units": "days since 2001-01",
"calendar": "standard",
}
dsc = decode_cf(ds)
assert dsc.time_bnds.dtype == np.dtype("M8[ns]")
dsc = decode_cf(ds, decode_times=False)
assert dsc.time_bnds.dtype == np.dtype("int64")
# Do not overwrite existing attrs
ds = da.to_dataset()
ds["time"].attrs.update(attrs)
bnd_attr = {"units": "hours since 2001-01", "calendar": "noleap"}
ds["time_bnds"].attrs.update(bnd_attr)
_update_bounds_attributes(ds.variables)
assert ds.variables["time_bnds"].attrs == bnd_attr
# If bounds variable not available do not complain
ds = da.to_dataset()
ds["time"].attrs.update(attrs)
ds["time"].attrs["bounds"] = "fake_var"
_update_bounds_attributes(ds.variables)
@requires_cftime_or_netCDF4
def test_encode_time_bounds():
time = pd.date_range("2000-01-16", periods=1)
time_bounds = pd.date_range("2000-01-01", periods=2, freq="MS")
ds = Dataset(dict(time=time, time_bounds=time_bounds))
ds.time.attrs = {"bounds": "time_bounds"}
ds.time.encoding = {"calendar": "noleap", "units": "days since 2000-01-01"}
expected = {}
# expected['time'] = Variable(data=np.array([15]), dims=['time'])
expected["time_bounds"] = Variable(data=np.array([0, 31]), dims=["time_bounds"])
encoded, _ = cf_encoder(ds.variables, ds.attrs)
assert_equal(encoded["time_bounds"], expected["time_bounds"])
assert "calendar" not in encoded["time_bounds"].attrs
assert "units" not in encoded["time_bounds"].attrs
# if time_bounds attrs are same as time attrs, it doesn't matter
ds.time_bounds.encoding = {"calendar": "noleap", "units": "days since 2000-01-01"}
encoded, _ = cf_encoder({k: ds[k] for k in ds.variables}, ds.attrs)
assert_equal(encoded["time_bounds"], expected["time_bounds"])
assert "calendar" not in encoded["time_bounds"].attrs
assert "units" not in encoded["time_bounds"].attrs
# for CF-noncompliant case of time_bounds attrs being different from
# time attrs; preserve them for faithful roundtrip
ds.time_bounds.encoding = {"calendar": "noleap", "units": "days since 1849-01-01"}
encoded, _ = cf_encoder({k: ds[k] for k in ds.variables}, ds.attrs)
with pytest.raises(AssertionError):
assert_equal(encoded["time_bounds"], expected["time_bounds"])
assert "calendar" not in encoded["time_bounds"].attrs
assert encoded["time_bounds"].attrs["units"] == ds.time_bounds.encoding["units"]
ds.time.encoding = {}
with pytest.warns(UserWarning):
cf_encoder(ds.variables, ds.attrs)
@pytest.fixture(params=_ALL_CALENDARS)
def calendar(request):
return request.param
@pytest.fixture()
def times(calendar):
cftime = _import_cftime()
return cftime.num2date(
np.arange(4),
units="hours since 2000-01-01",
calendar=calendar,
only_use_cftime_datetimes=True,
)
@pytest.fixture()
def data(times):
data = np.random.rand(2, 2, 4)
lons = np.linspace(0, 11, 2)
lats = np.linspace(0, 20, 2)
return DataArray(
data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data"
)
@pytest.fixture()
def times_3d(times):
lons = np.linspace(0, 11, 2)
lats = np.linspace(0, 20, 2)
times_arr = np.random.choice(times, size=(2, 2, 4))
return DataArray(
times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data"
)
@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
def test_contains_cftime_datetimes_1d(data):
assert contains_cftime_datetimes(data.time)
@pytest.mark.skipif(not has_dask, reason="dask not installed")
@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
def test_contains_cftime_datetimes_dask_1d(data):
assert contains_cftime_datetimes(data.time.chunk())
@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
def test_contains_cftime_datetimes_3d(times_3d):
assert contains_cftime_datetimes(times_3d)
@pytest.mark.skipif(not has_dask, reason="dask not installed")
@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
def test_contains_cftime_datetimes_dask_3d(times_3d):
assert contains_cftime_datetimes(times_3d.chunk())
@pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])])
def test_contains_cftime_datetimes_non_cftimes(non_cftime_data):
assert not contains_cftime_datetimes(non_cftime_data)
@pytest.mark.skipif(not has_dask, reason="dask not installed")
@pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])])
def test_contains_cftime_datetimes_non_cftimes_dask(non_cftime_data):
assert not contains_cftime_datetimes(non_cftime_data.chunk())
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@pytest.mark.parametrize("shape", [(24,), (8, 3), (2, 4, 3)])
def test_encode_cf_datetime_overflow(shape):
# Test for fix to GH 2272
dates = pd.date_range("2100", periods=24).values.reshape(shape)
units = "days since 1800-01-01"
calendar = "standard"
num, _, _ = encode_cf_datetime(dates, units, calendar)
roundtrip = decode_cf_datetime(num, units, calendar)
np.testing.assert_array_equal(dates, roundtrip)
def test_encode_cf_datetime_pandas_min():
# GH 2623
dates = pd.date_range("2000", periods=3)
num, units, calendar = encode_cf_datetime(dates)
expected_num = np.array([0.0, 1.0, 2.0])
expected_units = "days since 2000-01-01 00:00:00"
expected_calendar = "proleptic_gregorian"
np.testing.assert_array_equal(num, expected_num)
assert units == expected_units
assert calendar == expected_calendar
@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
def test_time_units_with_timezone_roundtrip(calendar):
# Regression test for GH 2649
expected_units = "days since 2000-01-01T00:00:00-05:00"
expected_num_dates = np.array([1, 2, 3])
dates = decode_cf_datetime(expected_num_dates, expected_units, calendar)
# Check that dates were decoded to UTC; here the hours should all
# equal 5.
result_hours = DataArray(dates).dt.hour
expected_hours = DataArray([5, 5, 5])
assert_equal(result_hours, expected_hours)
# Check that the encoded values are accurately roundtripped.
result_num_dates, result_units, result_calendar = encode_cf_datetime(
dates, expected_units, calendar
)
if calendar in _STANDARD_CALENDARS:
np.testing.assert_array_equal(result_num_dates, expected_num_dates)
else:
# cftime datetime arithmetic is not quite exact.
np.testing.assert_allclose(result_num_dates, expected_num_dates)
assert result_units == expected_units
assert result_calendar == calendar
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_default_standard_calendar_in_range(calendar):
numerical_dates = [0, 1]
units = "days since 2000-01-01"
expected = pd.date_range("2000", periods=2)
with pytest.warns(None) as record:
result = decode_cf_datetime(numerical_dates, units, calendar)
np.testing.assert_array_equal(result, expected)
assert not record
@requires_cftime
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2500])
def test_use_cftime_default_standard_calendar_out_of_range(calendar, units_year):
from cftime import num2date
numerical_dates = [0, 1]
units = f"days since {units_year}-01-01"
expected = num2date(
numerical_dates, units, calendar, only_use_cftime_datetimes=True
)
with pytest.warns(SerializationWarning):
result = decode_cf_datetime(numerical_dates, units, calendar)
np.testing.assert_array_equal(result, expected)
@requires_cftime
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_default_non_standard_calendar(calendar, units_year):
from cftime import num2date
numerical_dates = [0, 1]
units = f"days since {units_year}-01-01"
expected = num2date(
numerical_dates, units, calendar, only_use_cftime_datetimes=True
)
with pytest.warns(None) as record:
result = decode_cf_datetime(numerical_dates, units, calendar)
np.testing.assert_array_equal(result, expected)
assert not record
@requires_cftime
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_true(calendar, units_year):
from cftime import num2date
numerical_dates = [0, 1]
units = f"days since {units_year}-01-01"
expected = num2date(
numerical_dates, units, calendar, only_use_cftime_datetimes=True
)
with pytest.warns(None) as record:
result = decode_cf_datetime(numerical_dates, units, calendar, use_cftime=True)
np.testing.assert_array_equal(result, expected)
assert not record
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_false_standard_calendar_in_range(calendar):
numerical_dates = [0, 1]
units = "days since 2000-01-01"
expected = pd.date_range("2000", periods=2)
with pytest.warns(None) as record:
result = decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False)
np.testing.assert_array_equal(result, expected)
assert not record
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2500])
def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year):
numerical_dates = [0, 1]
units = f"days since {units_year}-01-01"
with pytest.raises(OutOfBoundsDatetime):
decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False)
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_false_non_standard_calendar(calendar, units_year):
numerical_dates = [0, 1]
units = f"days since {units_year}-01-01"
with pytest.raises(OutOfBoundsDatetime):
decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False)
| |
# Copyright (c) 2009-2012, Geoffrey Biggs
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Geoffrey Biggs nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# File: package.py
# Author: Geoffrey Biggs
# Part of pykg-config.
"""Package class for pykg-config.
Stores information read from a pkg-config file.
"""
__version__ = "$Revision: $"
# $Source$
from copy import deepcopy
from os.path import abspath, dirname, join, normpath
import re
import shlex
import sys
from pykg_config.errorprinter import ErrorPrinter
from pykg_config.exceptions import ParseError
from pykg_config.pcfile import read_pc_file
from pykg_config.substitute import substitute
from pykg_config.props import *
from pykg_config.options import Options
from pykg_config.packagespeclist import parse_package_spec_list
from pykg_config.version import BadVersionFormatError, Version
##############################################################################
# Package class
class Package:
"""This class stores the information gleaned from a pkg-config
file, allowing quick access to it.
"""
def __init__(self, filename=None, globals={}):
# Different platforms may use different flags and extensions
if sys.platform == 'win32' and Options().get_option('use_msvc_syntax'):
self.lib_suffix = '.lib'
else:
self.lib_suffix = ''
# Parse a file if one was given
if filename is not None:
self.load_from_pc_file(filename, globals)
if filename.endswith('-uninstalled'):
self.uninstalled = True
else:
self.uninstalled = False
else:
self.clear()
def __str__(self):
result = self.filename + '\nProperties:\n'
for key in self.properties:
if key == 'requires' or key == 'requires.private' or \
key == 'conflicts':
result += '%s:\t%s\n' % \
(key, [str(a) for a in self.properties[key]])
else:
result += '%s:\t%s\n' % (key, self.properties[key])
result += 'Variables:\n'
for key in self.variables:
result += '%s:\t%s\n' % (key, self.variables[key])
return result
@property
def variables(self):
"""Variables used by the package properties."""
return self._vars
@variables.setter
def variables(self, new_vars):
self._vars = new_vars
@property
def properties(self):
"""Properties of the package."""
return self._props
@properties.setter
def properties(self, new_props):
self._props = new_props
@property
def filename(self):
"""File name of the pkg-config file this package was loaded from."""
return self._filename
def clear(self):
"""Clear all package data."""
self._props = deepcopy(empty_processed_props)
self._vars = {}
self.raw_props = deepcopy(empty_raw_props)
self.raw_vars = {}
self.filename = ''
def get_raw_property(self, prop):
"""Get a property value in its raw format, as it appears in the
file.
"""
return self.raw_props[prop]
def get_raw_variable(self, var):
"""Get a variable in its raw format, as it appears in the file."""
return self.raw_vars[var]
def sanity_check(self):
return True
def load_from_pc_file(self, filename, global_variables):
"""Load data from a package config file and process it."""
self.raw_vars, self.variables, \
self.raw_props = read_pc_file(filename, global_variables)
self._filename = filename
self._process_props(global_variables)
def _process_props(self, global_variables):
# Processing of file data
props = self.raw_props
# May need to reset the prefix variable
if sys.platform == 'win32' and \
not Options().get_option('dont_define_prefix'):
# Use the location of the .pc file to guess a suitable value for
# the prefix variable. Start by checking if the absolute .pc
# location ends with '\lib\pkgconfig'.
abs_loc = dirname(abspath(self.filename))
if Options().get_option('normalise_paths'):
abs_loc = normpath(abs_loc)
else:
# If not normalising paths, then all paths should be in /
# format for consistency
abs_loc = abs_loc.replace('\\', '/')
if abs_loc.endswith('\\lib\\pkgconfig'):
self.variables[Options().get_option('prefix_variable')] = \
abs_loc.rstrip('\\lib\\pkgconfig')
ErrorPrinter().debug_print('Replaced {0} with \
{1}'.format(Options().get_option('prefix_variable'),
self.variables[Options().get_option('prefix_variable')]))
# Perform substitutions
for key in props:
props[key] = substitute(props[key], self.variables,
global_variables)
# Parse the data
self.properties = deepcopy(empty_processed_props)
self.properties['name'] = props['name']
if props['description']:
self.properties['description'] = props['description']
if props['version']:
try:
self.properties['version'] = Version(props['version'])
except BadVersionFormatError as e:
raise BadVersionFormatError(e.versionstring, props['name'])
self.properties['requires'] = \
parse_package_spec_list(props['requires'])
self.properties['requires.private'] = \
parse_package_spec_list(props['requires.private']) + \
self.properties['requires']
self.properties['conflicts'] = \
parse_package_spec_list(props['conflicts'])
self._parse_cflags(props['cflags'], global_variables)
self._parse_libs(props['libs'], global_variables)
self._parse_libs(props['libs.private'], global_variables, dest='private.')
def _parse_cflags(self, value, global_variables):
flags = shlex.split(value, posix=False)
for flag in flags:
if flag.startswith('-I'):
if flag[2:] not in \
Options().get_option('forbidden_cflags'):
# Prepend pc_sysrootdir if necessary
pc_sysrootdir = global_variables.get('pc_sysrootdir', None)
if pc_sysrootdir:
# Strip the leading slashes from the flag path
# because os.path.join() will ignore
# pc_sysrootdir if it thinks the flag is an
# absolute path
include_dir = join(pc_sysrootdir,
flag[2:].strip().lstrip('/'))
else:
include_dir = flag[2:].strip()
if Options().get_option('full_compatibility') and \
include_dir:
# Drop everything after the first space when trying
# to be fully compatible (sucky behaviour on Win32).
include_dir = include_dir.split()[0]
if sys.platform == 'win32':
if Options().get_option('normalise_paths'):
include_dir = normpath(include_dir)
else:
include_dir = include_dir.replace('\\', '/')
self.properties['include_dirs'].append(include_dir)
else:
self.properties['other_cflags'].append(flag.strip())
def _parse_libs(self, value, global_variables, dest=''):
# Parse lib flags
libs = shlex.split(value)
skip_next = False
for ii, lib in enumerate(libs):
if skip_next:
# Possibly skip an entry that was eaten by a -framework
skip_next = False
continue
if lib.startswith('-l'):
self.properties[dest + 'libs'].append(lib[2:].strip() + \
self.lib_suffix)
elif lib.startswith('-L'):
if lib[2:] not in \
Options().get_option('forbidden_libdirs'):
# Prepend pc_sysrootdir if necessary
pc_sysrootdir = global_variables.get('pc_sysrootdir', None)
if pc_sysrootdir:
# Strip the leading slashes from the flag path
# because os.path.join() will ignore
# pc_sysrootdir if it thinks the flag is an
# absolute path
libpath = join(pc_sysrootdir,
lib[2:].strip().lstrip('/'))
else:
libpath = lib[2:].strip()
if Options().get_option('full_compatibility'):
# Drop everything after the first space when trying
# to be fully compatible (sucky behaviour on Win32).
libpath = libpath.split()[0]
if sys.platform == 'win32':
if Options().get_option('normalise_paths'):
libpath = normpath(libpath)
else:
libpath = libpath.replace('\\', '/')
self.properties[dest + 'libpaths'].append(libpath)
elif lib.startswith('-framework'):
self.properties[dest + 'otherlibs']. \
append(libs[ii + 1].strip() + self.lib_suffix)
skip_next = True
else:
self.properties[dest + 'otherlibs'].append(lib.strip() + \
self.lib_suffix)
# vim: tw=79
| |
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
#TODO
"""
A lot of this code assumes that the link function is the identity.
I think laplace code is okay, but I'm quite sure that the EP moments will only work if the link is identity.
Furthermore, exact Guassian inference can only be done for the identity link, so we should be asserting so for all calls which relate to that.
James 11/12/13
"""
import numpy as np
from scipy import stats, special
from . import link_functions
from .likelihood import Likelihood
from ..core.parameterization import Param
from paramz.transformations import Logexp
from scipy import stats
class Gaussian(Likelihood):
"""
Gaussian likelihood
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param variance: variance value of the Gaussian distribution
:param N: Number of data points
:type N: int
"""
def __init__(self, gp_link=None, variance=1., name='Gaussian_noise'):
if gp_link is None:
gp_link = link_functions.Identity()
if not isinstance(gp_link, link_functions.Identity):
print("Warning, Exact inference is not implemeted for non-identity link functions,\
if you are not already, ensure Laplace inference_method is used")
super(Gaussian, self).__init__(gp_link, name=name)
self.variance = Param('variance', variance, Logexp())
self.link_parameter(self.variance)
if isinstance(gp_link, link_functions.Identity):
self.log_concave = True
def to_dict(self):
input_dict = super(Gaussian, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.Gaussian"
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def betaY(self,Y,Y_metadata=None):
#TODO: ~Ricardo this does not live here
raise RuntimeError("Please notify the GPy developers, this should not happen")
return Y/self.gaussian_variance(Y_metadata)
def gaussian_variance(self, Y_metadata=None):
return self.variance
def update_gradients(self, grad):
self.variance.gradient = grad
def ep_gradients(self, Y, cav_tau, cav_v, dL_dKdiag, Y_metadata=None, quad_mode='gk', boost_grad=1.):
return self.exact_inference_gradients(dL_dKdiag)
def exact_inference_gradients(self, dL_dKdiag, Y_metadata=None):
return dL_dKdiag.sum()
def _preprocess_values(self, Y):
"""
Check if the values of the observations correspond to the values
assumed by the likelihood function.
"""
return Y
def moments_match_ep(self, data_i, tau_i, v_i, Y_metadata_i=None):
"""
Moments match of the marginal approximation in EP algorithm
:param i: number of observation (int)
:param tau_i: precision of the cavity distribution (float)
:param v_i: mean/variance of the cavity distribution (float)
"""
sigma2_hat = 1./(1./self.variance + tau_i)
mu_hat = sigma2_hat*(data_i/self.variance + v_i)
sum_var = self.variance + 1./tau_i
Z_hat = 1./np.sqrt(2.*np.pi*sum_var)*np.exp(-.5*(data_i - v_i/tau_i)**2./sum_var)
return Z_hat, mu_hat, sigma2_hat
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
if full_cov:
if var.ndim == 2:
var += np.eye(var.shape[0])*self.variance
if var.ndim == 3:
var += np.atleast_3d(np.eye(var.shape[0])*self.variance)
else:
var += self.variance
return mu, var
def predictive_mean(self, mu, sigma):
return mu
def predictive_variance(self, mu, sigma, predictive_mean=None):
return self.variance + sigma**2
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
return [stats.norm.ppf(q/100.)*np.sqrt(var + self.variance) + mu for q in quantiles]
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: likelihood evaluated for this point
:rtype: float
"""
#Assumes no covariance, exp, sum, log for numerical stability
return np.exp(np.sum(np.log(stats.norm.pdf(y, link_f, np.sqrt(self.variance)))))
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log likelihood function given link(f)
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: log likelihood evaluated for this point
:rtype: float
"""
ln_det_cov = np.log(self.variance)
return -(1.0/(2*self.variance))*((y-link_f)**2) - 0.5*ln_det_cov - 0.5*np.log(2.*np.pi)
def dlogpdf_dlink(self, link_f, y, Y_metadata=None):
"""
Gradient of the pdf at y, given link(f) w.r.t link(f)
.. math::
\\frac{d \\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)} = \\frac{1}{\\sigma^{2}}(y_{i} - \\lambda(f_{i}))
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: gradient of log likelihood evaluated at points link(f)
:rtype: Nx1 array
"""
s2_i = 1.0/self.variance
grad = s2_i*y - s2_i*link_f
return grad
def d2logpdf_dlink2(self, link_f, y, Y_metadata=None):
"""
Hessian at y, given link_f, w.r.t link_f.
i.e. second derivative logpdf at y given link(f_i) link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{2}f} = -\\frac{1}{\\sigma^{2}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: Diagonal of log hessian matrix (second derivative of log likelihood evaluated at points link(f))
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
N = y.shape[0]
D = link_f.shape[1]
hess = -(1.0/self.variance)*np.ones((N, D))
return hess
def d3logpdf_dlink3(self, link_f, y, Y_metadata=None):
"""
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{3}\\lambda(f)} = 0
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: third derivative of log likelihood evaluated at points link(f)
:rtype: Nx1 array
"""
N = y.shape[0]
D = link_f.shape[1]
d3logpdf_dlink3 = np.zeros((N,D))
return d3logpdf_dlink3
def dlogpdf_link_dvar(self, link_f, y, Y_metadata=None):
"""
Gradient of the log-likelihood function at y given link(f), w.r.t variance parameter (noise_variance)
.. math::
\\frac{d \\ln p(y_{i}|\\lambda(f_{i}))}{d\\sigma^{2}} = -\\frac{N}{2\\sigma^{2}} + \\frac{(y_{i} - \\lambda(f_{i}))^{2}}{2\\sigma^{4}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: derivative of log likelihood evaluated at points link(f) w.r.t variance parameter
:rtype: float
"""
e = y - link_f
s_4 = 1.0/(self.variance**2)
dlik_dsigma = -0.5/self.variance + 0.5*s_4*np.square(e)
return dlik_dsigma
def dlogpdf_dlink_dvar(self, link_f, y, Y_metadata=None):
"""
Derivative of the dlogpdf_dlink w.r.t variance parameter (noise_variance)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)}) = \\frac{1}{\\sigma^{4}}(-y_{i} + \\lambda(f_{i}))
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: derivative of log likelihood evaluated at points link(f) w.r.t variance parameter
:rtype: Nx1 array
"""
s_4 = 1.0/(self.variance**2)
dlik_grad_dsigma = -s_4*y + s_4*link_f
return dlik_grad_dsigma
def d2logpdf_dlink2_dvar(self, link_f, y, Y_metadata=None):
"""
Gradient of the hessian (d2logpdf_dlink2) w.r.t variance parameter (noise_variance)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d^{2} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{2}\\lambda(f)}) = \\frac{1}{\\sigma^{4}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: derivative of log hessian evaluated at points link(f_i) and link(f_j) w.r.t variance parameter
:rtype: Nx1 array
"""
s_4 = 1.0/(self.variance**2)
N = y.shape[0]
D = link_f.shape[1]
d2logpdf_dlink2_dvar = np.ones((N, D))*s_4
return d2logpdf_dlink2_dvar
def dlogpdf_link_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
dlogpdf_dtheta[0,:,:] = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata)
return dlogpdf_dtheta
def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dlink_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
dlogpdf_dlink_dtheta[0, :, :]= self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata)
return dlogpdf_dlink_dtheta
def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None):
d2logpdf_dlink2_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
d2logpdf_dlink2_dtheta[0, :, :] = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata)
return d2logpdf_dlink2_dtheta
def _mean(self, gp):
"""
Expected value of y under the Mass (or density) function p(y|f)
.. math::
E_{p(y|f)}[y]
"""
return self.gp_link.transf(gp)
def _variance(self, gp):
"""
Variance of y under the Mass (or density) function p(y|f)
.. math::
Var_{p(y|f)}[y]
"""
return self.variance
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
#orig_shape = gp.shape
gp = gp.flatten()
Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp])
return Ysim.reshape(orig_shape)
def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None):
"""
assumes independence
"""
v = var_star + self.variance
return -0.5*np.log(2*np.pi) -0.5*np.log(v) - 0.5*np.square(y_test - mu_star)/v
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
if not isinstance(self.gp_link, link_functions.Identity):
return super(Gaussian, self).variational_expectations(Y=Y, m=m, v=v, gh_points=gh_points, Y_metadata=Y_metadata)
lik_var = float(self.variance)
F = -0.5*np.log(2*np.pi) -0.5*np.log(lik_var) - 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/lik_var
dF_dmu = (Y - m)/lik_var
dF_dv = np.ones_like(v)*(-0.5/lik_var)
dF_dtheta = -0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2)
return F, dF_dmu, dF_dv, dF_dtheta.reshape(1, Y.shape[0], Y.shape[1])
class HeteroscedasticGaussian(Gaussian):
def __init__(self, Y_metadata, gp_link=None, variance=1., name='het_Gauss'):
if gp_link is None:
gp_link = link_functions.Identity()
if not isinstance(gp_link, link_functions.Identity):
print("Warning, Exact inference is not implemeted for non-identity link functions,\
if you are not already, ensure Laplace inference_method is used")
super(HeteroscedasticGaussian, self).__init__(gp_link, np.ones(Y_metadata['output_index'].shape)*variance, name)
def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None):
return dL_dKdiag[Y_metadata['output_index']]
def gaussian_variance(self, Y_metadata=None):
return self.variance[Y_metadata['output_index'].flatten()]
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
_s = self.variance[Y_metadata['output_index'].flatten()]
if full_cov:
if var.ndim == 2:
var += np.eye(var.shape[0])*_s
if var.ndim == 3:
var += np.atleast_3d(np.eye(var.shape[0])*_s)
else:
var += _s
return mu, var
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
_s = self.variance[Y_metadata['output_index'].flatten()]
return [stats.norm.ppf(q/100.)*np.sqrt(var + _s) + mu for q in quantiles]
| |
"""
charger
=====
Simulates a charging system (e.g. EV-charging, battery charging)
Configurable parameters::
{
<TODO>
}
Device properties created::
{
pilot : A|B|C|F # Pilot signal indicates charging state (A=Available, B=Blocking, C=Charging, F=Fault)
energy: # kWh transferred so far this session (ramp which rises)
power: # instantaneous kW
uui : # Charging session token: a random number for each charging session
max_kW : # Max charging power
monthly_value: # Approx monthly value of charger
occupied: # True when there is a vehicle present (which may be ICE in which case no charge cycle will occur)
}
There are three things that can get in the way of charging:
1) Hogging - a car finishes charging (Pilot goes C->B) and just stays there
2) Fault - a charger goes into a fault state
3) ICEing - a car arrives in the spot (occupied==True) but doesn't start charging.
"""
import logging
import random
import isodate
import datetime
from .helpers import opening_times as opening_times
from .helpers import ev_mfrs as ev_mfrs
from common import utils
from .device import Device
MINS = 60
HOURS = MINS * 60
DAYS = HOURS * 24
MAX_INTERVAL_BETWEEN_POTENTIAL_CHARGES_S = 10 * HOURS # Not precise, but smaller means more charging
DEFAULT_AVERAGE_BLOCKING_TIME = "PT60M"
CHANCE_OF_BLOCKING = 0.2 # If this is close to 1, implies that cars often charge to full.
CHANCE_OF_ZERO_ENERGY_CHARGE = 0.01
CHANCE_OF_SILENT_FAULT = 0.005 # A silent fault is one that isn't flagged with a fault code, but nevertheless prevents charging (e.g. external damage)
CHARGE_POLL_INTERVAL_S = 5 * MINS
MIN_GAP_BETWEEN_CHARGES_S = 10 * MINS
CHARGER_MAX_RATE_PERCENT = [ [7, 20],
[22, 40],
[50, 20],
[100, 10],
[150, 10] ]
CHARGE_RATES_KW_PERCENT = [ [3, 10],
[7, 30],
[22, 30],
[50, 20],
[100,5],
[150,5] ]
MAX_KWH_PER_CHARGE = 70
DWELL_TIME_MIN_S = 20 * MINS
DWELL_TIME_MAX_S = 8 * HOURS
DWELL_TIME_AV_S = 1 * HOURS
KWH_PER_CHARGE_MIN = 4
KWH_PER_CHARGE_MAX = 70
KWH_PER_CHARGE_AV = 20
HEARTBEAT_PERIOD = 15 * MINS
POWER_TO_MONTHLY_VALUE = 8 # Ratio to turn charger's max kW into currency
FAULTS = [
# Fault MTBF FRACTIONAL DECREASE BASED ON LOCATION
["Earth Relay", 20 * DAYS, 0.50],
["Mennekes Fault", 40 * DAYS, 0.30],
["Overcurrent", 15 * DAYS, 0.00],
["RCD trip", 20 * DAYS, 0.00],
["Relay Weld", 100 * DAYS, 0.00],
["Overtemperature", 100 * DAYS, 0.40]
]
FAULT_RECTIFICATION_TIME_AV = 2 * DAYS
ALT_FAULT_CODES = { # Some chargers emit different fault codes
"Earth Relay" : 100,
"Mennekes Fault" : 200,
"Overcurrent" : 300,
"RCD trip" : 400,
"Relay Weld" : 500,
"Overtemperature" : 600
}
VOLTAGE_FAULT = "Voltage excursion"
MAX_SAFE_VOLTAGE = 253
MIN_SAFE_VOLTAGE = 207
CHANCE_OF_ICEING = 0.02 # For any intended charge cycle, what is the chance that instead someone blocks the charger with a fossil-fuel car (i.e. "occupied" but no charge)
class Charger(Device):
myRandom = random.Random() # Use our own private random-number generator for repeatability
myRandom.seed(5678)
def expo_random(self, min_val, max_val, av_val):
n = Charger.myRandom.expovariate(1/av_val)
n = min(max_val, n)
n = max(min_val, n)
return n
def __init__(self, instance_name, time, engine, update_callback, context, params):
super(Charger,self).__init__(instance_name, time, engine, update_callback, context, params)
self.loc_rand = utils.consistent_hash(self.get_property_or_None("address_postal_code")) # Allows us to vary behaviour based on our location
(mfr,model,max_rate,datasheet) = ev_mfrs.pick_mfr_model_kW_datasheet(self.loc_rand)
self.set_properties( {
"manufacturer" : mfr,
"model" : model,
"max_kW" : max_rate,
"datasheet" : datasheet,
"monthly_value" : max_rate * POWER_TO_MONTHLY_VALUE * Charger.myRandom.random() * 2
} )
self.opening_time_pattern = opening_times.pick_pattern(self.loc_rand)
self.set_property("opening_times", opening_times.specification(self.opening_time_pattern))
self.set_property("device_type", "charger")
domain = self.get_property("address_postal_code").replace(" ","") + ".example.com"
self.set_property("domain", domain)
self.set_property("email", self.get_property("$id") + "@" + domain)
sevendigits = "%07d" % int(self.loc_rand * 1E7)
self.set_property("phone", "+1" + sevendigits[0:3] + "555" + sevendigits[3:7])
self.set_property("occupied", False)
self.average_blocking_time_s = isodate.parse_duration(params["charger"].get("average_blocking_time", DEFAULT_AVERAGE_BLOCKING_TIME)).total_seconds()
self.last_charging_start_time = None
self.last_charging_end_time = None
self.set_properties( {
"pilot" : "A",
"energy" : 0,
"energy_delta" : 0,
"power" : 0,
"fault" : None
})
self.silent_fault = False
self.engine.register_event_in(HEARTBEAT_PERIOD, self.tick_heartbeat, self, self)
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
def comms_ok(self):
return super(Charger,self).comms_ok()
def external_event(self, event_name, arg):
super(Charger,self).external_event(event_name, arg)
logging.info("Handling external event for "+str(self.properties["$id"]))
if event_name=="resetVoltageExcursion":
logging.info("Resetting voltage excursion on device "+self.properties["$id"])
self.set_properties({
"pilot" : "A",
"fault" : None
})
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
else:
logging.error("Ignoring unrecognised external event "+str(event_name))
def close(self):
super(Charger,self).close()
def pick_a_fault(self, sampling_interval_s):
for (fault, mtbf, var) in FAULTS:
var *= self.loc_rand # 0..1 based on location
mtbf = mtbf * (1-var) # Decrease MTBF by var (i.e. make it less reliable)
chance = sampling_interval_s / mtbf # 50% point
if Charger.myRandom.random() < chance * 0.5:
if self.get_property("max_kW") == 50: # 50kW chargers report different error codes (example of a real-world bizarreness)
fault = ALT_FAULT_CODES[fault]
return fault
return None
def tick_heartbeat(self, _):
self.set_properties({
"heartbeat" : True
})
# Go into fault state if voltage outside legal limits
v = self.get_property("voltage")
if v is not None:
if v > MAX_SAFE_VOLTAGE or v < MIN_SAFE_VOLTAGE:
self.enter_fault_state(VOLTAGE_FAULT)
self.engine.register_event_in(HEARTBEAT_PERIOD, self.tick_heartbeat, self, self)
def tick_start_charge(self, _):
# Maybe this is an ICEing, not a charge
if Charger.myRandom.random() < CHANCE_OF_ICEING:
self.set_property("occupied", True)
self.engine.register_event_at(self.time_of_next_charge(), self.tick_end_iceing, self, self) # An iceing takes as long as a charge, let's say
return
# Faulty points can't charge
if self.get_property("fault") != None:
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
return
if Charger.myRandom.random() < CHANCE_OF_SILENT_FAULT: # Start a silent fault
self.silent_fault = True
if self.silent_fault: # For now, silent faults never end
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
return
self.uui = Charger.myRandom.randrange(0, 99999999)
rate = self.choose_percent(CHARGE_RATES_KW_PERCENT) # What rate would car like to charge?
rate = min(rate, self.get_property("max_kW")) # Limit to charger capacity
self.charging_rate_kW = rate
if Charger.myRandom.random() < CHANCE_OF_ZERO_ENERGY_CHARGE:
logging.info(self.get_property("$id")+": Starting zero energy charge")
self.charging_rate_kW = 0
self.energy_to_transfer = self.expo_random(KWH_PER_CHARGE_MIN, KWH_PER_CHARGE_MAX, KWH_PER_CHARGE_AV)
self.max_charging_time_s = self.expo_random(DWELL_TIME_MIN_S, DWELL_TIME_MAX_S, DWELL_TIME_AV_S)
self.energy_this_charge = 0
self.last_charging_start_time = self.engine.get_now()
self.set_properties({
"pilot" : "C",
"uui" : self.uui,
"energy" : 0,
"energy_delta" : 0,
"power" : self.charging_rate_kW,
"fault" : None,
"occupied" : True
})
self.engine.register_event_in(CHARGE_POLL_INTERVAL_S, self.tick_check_charge, self, self)
def enter_fault_state(self, fault):
logging.info(str(self.get_property("$id")) + " " + str(fault) + " fault")
self.set_properties({
"pilot" : "F",
"fault" : fault,
"energy" : 0, # We might have been charging so stop charge
"energy_delta" : 0,
"power" : 0
})
self.engine.register_event_in(Charger.myRandom.random() * FAULT_RECTIFICATION_TIME_AV * 2, self.tick_rectify_fault, self, self)
def tick_check_charge(self, _):
# (faults can be externally-injected)
if self.get_property("fault") != None:
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
return
energy_transferred = (CHARGE_POLL_INTERVAL_S / (60*60)) * self.charging_rate_kW
self.energy_this_charge += energy_transferred
self.energy_to_transfer -= energy_transferred
fault = self.pick_a_fault(CHARGE_POLL_INTERVAL_S) # Faults only occur while charging
if fault != None:
self.enter_fault_state(fault)
else:
if (self.energy_to_transfer > 0) and (self.engine.get_now() - self.last_charging_start_time < self.max_charging_time_s): # STILL CHARGING
self.set_properties({
# "pilot" : "C",
"energy" : int(self.energy_this_charge),
"energy_delta" : energy_transferred,
"power" : self.charging_rate_kW })
self.engine.register_event_in(CHARGE_POLL_INTERVAL_S, self.tick_check_charge, self, self)
else: # FINISHED CHARGING
self.time_finished_charging = self.engine.get_now()
if Charger.myRandom.random() < CHANCE_OF_BLOCKING:
self.will_block_for = Charger.myRandom.random() * self.average_blocking_time_s # BLOCKING
self.set_properties({
"pilot" : "B",
"energy" : int(self.energy_this_charge),
"energy_delta" : 0,
"power" : 0})
self.engine.register_event_in(CHARGE_POLL_INTERVAL_S, self.tick_check_blocking, self, self)
else: # AVAILABLE
self.set_properties({
"pilot" : "A",
"occupied" : False,
"energy" : 0,
"energy_delta" : 0,
"power" : 0})
tonc = self.time_of_next_charge()
self.engine.register_event_at(tonc, self.tick_start_charge, self, self)
def tick_check_blocking(self, _):
if self.engine.get_now() >= self.time_finished_charging + self.will_block_for:
self.set_properties({
"pilot" : "A",
"energy" : 0,
"occupied" : False}) # Disconnect
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
else:
self.set_properties({
"pilot" : "B",
"energy" : int(self.energy_this_charge)
})
self.engine.register_event_in(CHARGE_POLL_INTERVAL_S, self.tick_check_blocking, self, self)
def tick_end_iceing(self, _):
self.set_property("occupied", False)
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
def tick_rectify_fault(self, _):
if self.get_property("fault") in [VOLTAGE_FAULT]: # Some faults don't fix themselves
return
self.set_properties({
"pilot" : "A",
"fault" : None
})
self.engine.register_event_at(self.time_of_next_charge(), self.tick_start_charge, self, self)
# def delay_to_next_charge(self):
# last = self.last_charging_start_time
# if last is None:
# last = self.engine.get_now()
#
# nominal = DAYS/self.average_charges_per_day
# interval = Charger.myRandom.expovariate(1.0/nominal)
# interval = min(interval, nominal * 10)
#
# next = last + interval
# delay = next - self.engine.get_now()
# delay = max(60, delay)
#
# return delay
def should_charge_at(self, epoch):
# Given a time, should we charge at it?
chance = opening_times.chance_of_occupied(epoch, self.opening_time_pattern)
yes = chance > Charger.myRandom.random()
return yes
def time_of_next_charge(self):
# last = self.last_charging_start_time or self.engine.get_now() # Why from start? For statistical purity probably, but risks trying to start a charge in the past?
t0 = self.engine.get_now() + MIN_GAP_BETWEEN_CHARGES_S # This ASSUMES that we're asking this question at the end of a charge (sometimes at least)
while True: # Keep picking plausible charging times, and use opening_times to tell us how likely each is, until we get lucky
if self.should_charge_at(t0):
return t0
# nominal = DAYS / self.average_charges_per_day
# interval = Charger.myRandom.expovariate(1.0/nominal)
# interval = min(interval, nominal * 10)
# interval *= opening_times.average_occupancy() # Rescale interval to compensate for the average likelihood of opening_times() returning True (so on average we'll hit our target number of charges per day)
# t0 += interval
t0 += Charger.myRandom.random() * MAX_INTERVAL_BETWEEN_POTENTIAL_CHARGES_S
def choose_percent(self, table):
percent = Charger.myRandom.randrange(0, 100)
choice = 0
cum_likelihood = 0
while True:
rate,likelihood = table[choice]
cum_likelihood += likelihood
if percent <= cum_likelihood:
break
choice += 1
return rate
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift import gettext_ as _
from swift.common.utils import search_tree, remove_file, write_file
from swift.common.exceptions import InvalidPidFileException
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
PROC_DIR = '/proc'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-reconciler',
'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator',
'object-reconstructor', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
# aliases mapping
ALIASES = {'all': ALL_SERVERS, 'main': MAIN_SERVERS, 'rest': REST_SERVERS}
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer', 'container-reconciler']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
MAX_PROCS = 8192 # workers * disks, can get high
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print(_("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_("WARNING: Unable to modify memory limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print(_("WARNING: Unable to modify max process limit. "
"Running as non-root?"))
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
def safe_kill(pid, sig, name):
"""Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
"""
# check process name for SIG_DFL
if sig == signal.SIG_DFL:
try:
proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
if os.path.exists(proc_file):
with open(proc_file, 'r') as fd:
if name not in fd.read():
# unknown process is using the pid
raise InvalidPidFileException()
except IOError:
pass
os.kill(pid, sig)
def kill_group(pid, sig):
"""Send signal to process group
: param pid: process id
: param sig: signal to send
"""
# Negative PID means process group
os.kill(-pid, sig)
class UnknownCommandError(Exception):
pass
class Manager(object):
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
self.server_names = set()
self._default_strict = True
for server in servers:
if server in ALIASES:
self.server_names.update(ALIASES[server])
self._default_strict = False
elif '*' in server:
# convert glob to regex
self.server_names.update([
s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
self._default_strict = False
else:
self.server_names.add(server)
self.servers = set()
for name in self.server_names:
self.servers.add(Server(name, run_dir))
def __iter__(self):
return iter(self.servers)
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
strict = kwargs.get('strict')
# if strict not set explicitly
if strict is None:
strict = self._default_strict
for server in self.servers:
status += 0 if server.launch(**kwargs) else 1
if not strict:
status = 0
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print(_('\nuser quit'))
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print(_('No %s running') % server)
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print(_("%(server)s (%(pid)s) appears to have stopped") %
{'server': server, 'pid': killed_pid})
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all processes have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
kill_after_timeout = kwargs.get('kill_after_timeout', False)
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
if kill_after_timeout:
print(_('Waited %(kill_wait)s seconds for %(server)s '
'to die; killing') %
{'kill_wait': kill_wait, 'server': server})
# Send SIGKILL to all remaining pids
for pid in set(pids.keys()) - killed_pids:
print(_('Signal %(server)s pid: %(pid)s signal: '
'%(signal)s') % {'server': server,
'pid': pid,
'signal': signal.SIGKILL})
# Send SIGKILL to process group
try:
kill_group(pid, signal.SIGKILL)
except OSError as e:
# PID died before kill_group can take action?
if e.errno != errno.ESRCH:
raise
else:
print(_('Waited %(kill_wait)s seconds for %(server)s '
'to die; giving up') %
{'kill_wait': kill_wait, 'server': server})
return 1
@command
def kill(self, **kwargs):
"""stop a server (no error if not running)
"""
status = self.stop(**kwargs)
kwargs['quiet'] = True
if status and not self.status(**kwargs):
# only exit error if the server is still running
return status
return 0
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.server_names:
m = Manager([server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
f = getattr(self, cmd, None)
if f is None:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server(object):
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
self.server = server.lower()
if '.' in self.server:
self.server, self.conf = self.server.rsplit('.', 1)
else:
self.conf = None
if '-' not in self.server:
self.server = '%s-server' % self.server
self.type = self.server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % self.server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).replace(
'.conf', '.pid', 1)
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
'.pid', '.conf', 1)
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).replace(
'.pid', '.conf', 1)
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
server_search = self.server
else:
server_search = "%s-server" % self.type
if self.conf is not None:
found_conf_files = search_tree(SWIFT_DIR, server_search,
self.conf + '.conf',
dir_ext=self.conf + '.conf.d')
else:
found_conf_files = search_tree(SWIFT_DIR, server_search + '*',
'.conf', dir_ext='.conf.d')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
if number:
print(_('Unable to locate config number %(number)s for'
' %(server)s') %
{'number': number, 'server': self.server})
else:
print(_('Unable to locate config for %s') % self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print(_('Found configs:'))
for i, conf_file in enumerate(found_conf_files):
print(' %d) %s' % (i + 1, conf_file))
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
if self.conf is not None:
pid_files = search_tree(self.run_dir, '%s*' % self.server,
exts=[self.conf + '.pid',
self.conf + '.pid.d'])
else:
pid_files = search_tree(self.run_dir, '%s*' % self.server)
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
try:
pid = int(open(pid_file).read().strip())
except ValueError:
pid = None
yield pid_file, pid
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
if not pid: # Catches None and 0
print(_('Removing pid file %s with invalid pid') % pid_file)
remove_file(pid_file)
continue
try:
if sig != signal.SIG_DFL:
print(_('Signal %(server)s pid: %(pid)s signal: '
'%(signal)s') %
{'server': self.server, 'pid': pid, 'signal': sig})
safe_kill(pid, sig, 'swift-%s' % self.server)
except InvalidPidFileException as e:
if kwargs.get('verbose'):
print(_('Removing pid file %(pid_file)s with wrong pid '
'%(pid)d'), {'pid_file': pid_file, 'pid': pid})
remove_file(pid_file)
except OSError as e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print(_("Removing stale pid file %s") % pid_file)
remove_file(pid_file)
elif e.errno == errno.EPERM:
print(_("No permission to signal PID %d") % pid)
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print(_("%(server)s #%(number)d not running (%(conf)s)") %
{'server': self.server, 'number': number,
'conf': conf_files[0]})
else:
print(_("No %s running") % self.server)
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print(_("%(server)s running (%(pid)s - %(conf)s)") %
{'server': self.server, 'pid': pid, 'conf': conf_file})
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if false ask server to log to console
:returns: the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if kwargs.get('once', False):
# if you don't want once to wait you can send it to the
# background on the command line, I generally just run with
# no-daemon anyway, but this is quieter
proc.wait()
if output:
print(output)
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return {}
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print(_("%(server)s running (%(pid)s - %(conf)s)") %
{'server': self.server, 'pid': pid, 'conf': conf_file})
elif not kwargs.get('number', 0):
already_started = True
print(_("%(server)s running (%(pid)s - %(pid_file)s)") %
{'server': self.server, 'pid': pid,
'pid_file': pid_file})
if already_started:
print(_("%s already started...") % self.server)
return {}
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print('%s...(%s)' % (msg, conf_file))
try:
pid = self.spawn(conf_file, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
# TODO(clayg): should I check if self.cmd exists earlier?
print(_("%s does not exist") % self.cmd)
break
else:
raise
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
| |
# None of this is django-specific. Don't import from django.
import os
import sys
import contextlib
import subprocess
import getpass
import logging
import urllib
from os.path import commonprefix
from . import environment
from .utils import get_next_available_port
_SCRIPT_NAME = 'dev_appserver.py'
_API_SERVER = None
class Filter(object):
def filter(self, record):
if record.funcName == '__StarSchemaQueryPlan' and record.module == 'datastore_sqlite_stub':
return 0
elif record.funcName == 'Run' and record.module == 'datastore':
return 0
else:
return 1
def _disable_sqlite_stub_logging():
"""
For some reason, Google decided to log all queries at debug level to the
root logger when running stuff locally. This switches that off (if you want it, then just
remove the filter)
"""
logging.getLogger().addFilter(Filter())
def _find_sdk_from_python_path():
import google.appengine
# Make sure we get the path of the 'google' module which contains 'appengine', as it's possible
# that there are several.
return os.path.abspath(os.path.dirname(os.path.dirname(google.appengine.__path__[0])))
def _find_sdk_from_path():
# Assumes `script_name` is on your PATH - SDK installers set this up
which = 'where' if sys.platform == "win32" else 'which'
path = subprocess.check_output([which, _SCRIPT_NAME]).strip()
sdk_dir = os.path.dirname(os.path.realpath(path))
if os.path.exists(os.path.join(sdk_dir, 'bootstrapping')):
# Cloud SDK
sdk_dir = os.path.abspath(os.path.join(sdk_dir, '..', 'platform', 'google_appengine'))
if not os.path.exists(sdk_dir):
raise RuntimeError(
'The Cloud SDK is on the path, but the app engine SDK dir could not be found'
)
else:
return sdk_dir
else:
# Regular App Engine SDK
return sdk_dir
def _create_dispatcher(configuration, options):
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2.devappserver2 import (
DevelopmentServer, _LOG_LEVEL_TO_RUNTIME_CONSTANT
)
from google.appengine.tools.sdk_update_checker import GetVersionObject, \
_VersionList
if hasattr(_create_dispatcher, "singleton"):
return _create_dispatcher.singleton
dispatcher_args = [
configuration,
options.host,
options.port,
options.auth_domain,
_LOG_LEVEL_TO_RUNTIME_CONSTANT[options.log_level],
DevelopmentServer._create_php_config(options),
DevelopmentServer._create_python_config(options),
DevelopmentServer._create_java_config(options),
DevelopmentServer._create_cloud_sql_config(options),
DevelopmentServer._create_vm_config(options),
DevelopmentServer._create_module_to_setting(options.max_module_instances,
configuration, '--max_module_instances'),
options.use_mtime_file_watcher,
options.automatic_restart,
options.allow_skipped_files,
DevelopmentServer._create_module_to_setting(options.threadsafe_override,
configuration, '--threadsafe_override')
]
# External port is a new flag introduced in 1.9.19
current_version = _VersionList(GetVersionObject()['release'])
if current_version >= _VersionList('1.9.19'):
dispatcher_args.append(options.external_port)
if current_version >= _VersionList('1.9.22'):
dispatcher_args.insert(8, None) # Custom config setting
_create_dispatcher.singleton = dispatcher.Dispatcher(*dispatcher_args)
return _create_dispatcher.singleton
@contextlib.contextmanager
def _local(devappserver2=None, configuration=None, options=None, wsgi_request_info=None, **kwargs):
# If we use `_LocalRequestInfo`, deferred tasks don't seem to work,
# but with the default `WSGIRequestInfo`, building the request url for
# blobstore uploads fails. So we inherit from `WSGIRequestInfo` and copy
# the `get_request_url` from `_LocalRequestInfo`
class CustomWSGIRequestInfo(wsgi_request_info.WSGIRequestInfo):
def get_request_url(self, request_id):
"""Returns the URL the request e.g. 'http://localhost:8080/foo?bar=baz'.
Args:
request_id: The string id of the request making the API call.
Returns:
The URL of the request as a string.
"""
try:
host = os.environ['HTTP_HOST']
except KeyError:
host = os.environ['SERVER_NAME']
port = os.environ['SERVER_PORT']
if port != '80':
host += ':' + port
url = 'http://' + host
url += urllib.quote(os.environ.get('PATH_INFO', '/'))
if os.environ.get('QUERY_STRING'):
url += '?' + os.environ['QUERY_STRING']
return url
global _API_SERVER
_disable_sqlite_stub_logging()
original_environ = os.environ.copy()
# Silence warnings about this being unset, localhost:8080 is the dev_appserver default
url = "localhost"
port = get_next_available_port(url, 8080)
os.environ.setdefault("HTTP_HOST", "{}:{}".format(url, port))
os.environ['SERVER_NAME'] = url
os.environ['SERVER_PORT'] = str(port)
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (os.environ['SERVER_NAME'], os.environ['SERVER_PORT'])
devappserver2._setup_environ(configuration.app_id)
storage_path = devappserver2._get_storage_path(options.storage_path, configuration.app_id)
dispatcher = _create_dispatcher(configuration, options)
request_data = CustomWSGIRequestInfo(dispatcher)
# Remember the wsgi request info object so it can be reused to avoid duplication.
dispatcher._request_data = request_data
_API_SERVER = devappserver2.DevelopmentServer._create_api_server(
request_data, storage_path, options, configuration)
from .blobstore_service import start_blobstore_service, stop_blobstore_service
start_blobstore_service()
try:
yield
finally:
os.environ = original_environ
stop_blobstore_service()
@contextlib.contextmanager
def _remote(configuration=None, remote_api_stub=None, apiproxy_stub_map=None, **kwargs):
def auth_func():
return raw_input('Google Account Login: '), getpass.getpass('Password: ')
original_apiproxy = apiproxy_stub_map.apiproxy
if configuration.app_id.startswith('dev~'):
app_id = configuration.app_id[4:]
else:
app_id = configuration.app_id
os.environ['HTTP_HOST'] = '{0}.appspot.com'.format(app_id)
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
try:
from google.appengine.tools.appcfg import APPCFG_CLIENT_ID, APPCFG_CLIENT_NOTSOSECRET
from google.appengine.tools import appengine_rpc_httplib2
params = appengine_rpc_httplib2.HttpRpcServerOAuth2.OAuth2Parameters(
access_token=None,
client_id=APPCFG_CLIENT_ID,
client_secret=APPCFG_CLIENT_NOTSOSECRET,
scope=remote_api_stub._OAUTH_SCOPES,
refresh_token=None,
credential_file=os.path.expanduser("~/.djangae_oauth2_tokens"),
token_uri=None
)
def factory(*args, **kwargs):
kwargs["auth_tries"] = 3
return appengine_rpc_httplib2.HttpRpcServerOAuth2(*args, **kwargs)
remote_api_stub.ConfigureRemoteApi(
app_id=None,
path='/_ah/remote_api',
auth_func=params,
servername='{0}.appspot.com'.format(app_id),
secure=True,
save_cookies=True,
rpc_server_factory=factory
)
except ImportError:
logging.exception("Unable to use oauth2 falling back to username/password")
remote_api_stub.ConfigureRemoteApi(
None,
'/_ah/remote_api',
auth_func,
servername='{0}.appspot.com'.format(app_id),
secure=True,
)
ps1 = getattr(sys, 'ps1', None)
red = "\033[0;31m"
native = "\033[m"
sys.ps1 = red + '(remote) ' + app_id + native + ' >>> '
try:
yield
finally:
apiproxy_stub_map.apiproxy = original_apiproxy
sys.ps1 = ps1
@contextlib.contextmanager
def _test(**kwargs):
yield
LOCAL = 'local'
REMOTE = 'remote'
TEST = 'test'
SANDBOXES = {
LOCAL: _local,
REMOTE: _remote,
TEST: _test,
}
_OPTIONS = None
_CONFIG = None
@contextlib.contextmanager
def activate(sandbox_name, add_sdk_to_path=False, new_env_vars=None, **overrides):
"""Context manager for command-line scripts started outside of dev_appserver.
:param sandbox_name: str, one of 'local', 'remote' or 'test'
:param add_sdk_to_path: bool, optionally adds the App Engine SDK to sys.path
:param options_override: an options structure to pass down to dev_appserver setup
Available sandboxes:
local: Adds libraries specified in app.yaml to the path and initializes local service stubs as though
dev_appserver were running.
remote: Adds libraries specified in app.yaml to the path and initializes remote service stubs.
test: Adds libraries specified in app.yaml to the path and sets up no service stubs. Use this
with `google.appengine.ext.testbed` to provide isolation for tests.
Example usage:
import djangae.sandbox as sandbox
with sandbox.activate('local'):
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
"""
if sandbox_name not in SANDBOXES:
raise RuntimeError('Unknown sandbox "{}"'.format(sandbox_name))
project_root = environment.get_application_root()
# Store our original sys.path before we do anything, this must be tacked
# onto the end of the other paths so we can access globally installed things (e.g. ipdb etc.)
original_path = sys.path[:]
# Setup paths as though we were running dev_appserver. This is similar to
# what the App Engine script wrappers do.
if add_sdk_to_path:
try:
import wrapper_util # Already on sys.path
except ImportError:
sys.path[0:0] = [_find_sdk_from_path()]
import wrapper_util
else:
try:
import wrapper_util
except ImportError:
raise RuntimeError("Couldn't find a recent enough Google App Engine SDK, make sure you are using at least 1.9.6")
sdk_path = _find_sdk_from_python_path()
_PATHS = wrapper_util.Paths(sdk_path)
project_paths = [] # Paths under the application root
system_paths = [] # All other paths
app_root = environment.get_application_root()
# We need to look at the original path, and make sure that any paths
# which are under the project root are first, then any other paths
# are added after the SDK ones
for path in _PATHS.scrub_path(_SCRIPT_NAME, original_path):
if commonprefix([app_root, path]) == app_root:
project_paths.append(path)
else:
system_paths.append(path)
# We build a list of SDK paths, and add any additional ones required for
# the oauth client
appengine_paths = _PATHS.script_paths(_SCRIPT_NAME)
for path in _PATHS.oauth_client_extra_paths:
if path not in appengine_paths:
appengine_paths.append(path)
# Now, we make sure that paths within the project take precedence, followed
# by the SDK, then finally any paths from the system Python (for stuff like
# ipdb etc.)
sys.path = (
project_paths +
appengine_paths +
system_paths
)
# Gotta set the runtime properly otherwise it changes appengine imports, like wepapp
# when you are not running dev_appserver
import yaml
with open(os.path.join(project_root, 'app.yaml'), 'r') as app_yaml:
app_yaml = yaml.load(app_yaml)
os.environ['APPENGINE_RUNTIME'] = app_yaml.get('runtime', '')
# Initialize as though `dev_appserver.py` is about to run our app, using all the
# configuration provided in app.yaml.
import google.appengine.tools.devappserver2.application_configuration as application_configuration
import google.appengine.tools.devappserver2.python.sandbox as sandbox
import google.appengine.tools.devappserver2.devappserver2 as devappserver2
import google.appengine.tools.devappserver2.wsgi_request_info as wsgi_request_info
import google.appengine.ext.remote_api.remote_api_stub as remote_api_stub
import google.appengine.api.apiproxy_stub_map as apiproxy_stub_map
# The argparser is the easiest way to get the default options.
options = devappserver2.PARSER.parse_args([project_root])
options.enable_task_running = False # Disable task running by default, it won't work without a running server
options.skip_sdk_update_check = True
for option in overrides:
if not hasattr(options, option):
raise ValueError("Unrecognized sandbox option: {}".format(option))
setattr(options, option, overrides[option])
configuration = application_configuration.ApplicationConfiguration(options.config_paths)
# Enable built-in libraries from app.yaml without enabling the full sandbox.
module = configuration.modules[0]
for l in sandbox._enable_libraries(module.normalized_libraries):
sys.path.insert(1, l)
# Propagate provided environment variables to the sandbox.
# This is required for the runserver management command settings flag,
# which sets an environment variable needed by Django.
from google.appengine.api.appinfo import EnvironmentVariables
old_env_vars = module.env_variables if module.env_variables else {}
if new_env_vars is None:
new_env_vars = {}
module._app_info_external.env_variables = EnvironmentVariables.Merge(
old_env_vars,
new_env_vars,
)
try:
global _OPTIONS
global _CONFIG
_CONFIG = configuration
_OPTIONS = options # Store the options globally so they can be accessed later
kwargs = dict(
devappserver2=devappserver2,
configuration=configuration,
options=options,
wsgi_request_info=wsgi_request_info,
remote_api_stub=remote_api_stub,
apiproxy_stub_map=apiproxy_stub_map,
)
with SANDBOXES[sandbox_name](**kwargs):
yield
finally:
sys.path = original_path
@contextlib.contextmanager
def allow_mode_write():
from google.appengine.tools.devappserver2.python import stubs
original_modes = stubs.FakeFile.ALLOWED_MODES
new_modes = set(stubs.FakeFile.ALLOWED_MODES)
new_modes.add('w')
new_modes.add('wb')
original_dirs = stubs.FakeFile._allowed_dirs
new_dirs = set(stubs.FakeFile._allowed_dirs or [])
# for some reason when we call gettempdir in some scenarios
# (we experience that in ajax call when we tried to render template
# with assets) we might end up with thread.error when Python tries
# to release the lock. Since we mess with the tempfile in allow_modules
# we could - instead of calling gettempdir - simply add default temp
# directories.
new_dirs.update(['/tmp', '/var/tmp', '/usr/tmp'])
stubs.FakeFile.ALLOWED_MODES = frozenset(new_modes)
stubs.FakeFile._allowed_dirs = frozenset(new_dirs)
try:
yield
finally:
stubs.FakeFile.ALLOWED_MODES = original_modes
stubs.FakeFile._allowed_dirs = original_dirs
class allow_modules_context():
def __enter__(self):
import sys
import subprocess
import os
import tempfile
import select
# Clear the meta_path so google does not screw our imports, make a copy
# of the old one
self.old_meta_path = sys.meta_path
sys.meta_path = []
self.patch_modules = [os, tempfile, select, subprocess]
import copy
self.environ = copy.copy(os.environ)
for mod in self.patch_modules:
_system = reload(mod)
mod.__dict__.update(_system.__dict__)
# We have to maintain the environment, or bad things happen
os.environ = self.environ # This gets monkey patched by GAE
def __exit__(self, *exc):
# Restore the original path
sys.meta_path = self.old_meta_path
# Reload the original modules
for mod in self.patch_modules:
_system = reload(mod)
mod.__dict__.update(_system.__dict__)
# Put the original os back, again
os.environ = self.environ
def allow_modules(func, *args):
"""
dev_appserver likes to kill your imports with meta_path madness so you
use the internal ones instead of system ones, this wrapper reloads the
modules and patches the google internal ones with the __dict__ from the
system modules, this seems to be the cleanest way to do this even though
it is a bit hacky
"""
def _wrapped(*args, **kwargs):
with allow_modules_context():
return func(*args, **kwargs)
return _wrapped
| |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers.experimental import preprocessing
import autokeras as ak
from autokeras import keras_layers
from autokeras import test_utils
from autokeras.engine import tuner as tuner_module
from autokeras.tuners import greedy
def called_with_early_stopping(func):
callbacks = func.call_args_list[0][1]["callbacks"]
return any(
[
isinstance(callback, keras.callbacks.EarlyStopping)
for callback in callbacks
]
)
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_final_fit_with_specified_epochs(_, final_fit, super_search, tmp_path):
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(x=None, epochs=10, validation_data=None)
assert final_fit.call_args_list[0][1]["epochs"] == 10
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_tuner_call_super_with_early_stopping(_, final_fit, super_search, tmp_path):
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(x=None, epochs=10, validation_data=None)
assert called_with_early_stopping(super_search)
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch(
"autokeras.engine.tuner.AutoTuner.get_best_models", return_value=[mock.Mock()]
)
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
@mock.patch("autokeras.pipeline.load_pipeline")
@mock.patch("keras_tuner.Oracle.get_best_trials", return_value=[mock.Mock()])
def test_no_final_fit_without_epochs_and_fov(
_, _1, _2, get_best_models, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
tuner.search(x=None, epochs=None, validation_data=None)
final_fit.assert_not_called()
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch(
"autokeras.engine.tuner.AutoTuner._get_best_trial_epochs", return_value=2
)
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_final_fit_best_epochs_if_epoch_unspecified(
_, best_epochs, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(
x=mock.Mock(), epochs=None, validation_split=0.2, validation_data=mock.Mock()
)
assert final_fit.call_args_list[0][1]["epochs"] == 2
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch(
"autokeras.engine.tuner.AutoTuner._get_best_trial_epochs", return_value=2
)
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_super_with_1k_epochs_if_epoch_unspecified(
_, best_epochs, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(
x=mock.Mock(), epochs=None, validation_split=0.2, validation_data=mock.Mock()
)
assert super_search.call_args_list[0][1]["epochs"] == 1000
assert called_with_early_stopping(super_search)
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_tuner_not_call_super_search_with_overwrite(
_, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(x=None, epochs=10, validation_data=None)
tuner.save()
super_search.reset_mock()
tuner = greedy.Greedy(hypermodel=test_utils.build_graph(), directory=tmp_path)
tuner.search(x=None, epochs=10, validation_data=None)
super_search.assert_not_called()
def test_tuner_does_not_crash_with_distribution_strategy(tmp_path):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(),
directory=tmp_path,
distribution_strategy=tf.distribute.MirroredStrategy(),
)
tuner.hypermodel.build(tuner.oracle.hyperparameters)
def test_preprocessing_adapt_with_cat_to_int_and_norm():
x = np.array([["a", 5], ["b", 6]]).astype(np.unicode)
y = np.array([[1, 2], [3, 4]]).astype(np.unicode)
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(32)
model = keras.models.Sequential()
model.add(keras.Input(shape=(2,), dtype=tf.string))
model.add(keras_layers.MultiCategoryEncoding(["int", "none"]))
model.add(preprocessing.Normalization(axis=-1))
tuner_module.AutoTuner.adapt(model, dataset)
def test_preprocessing_adapt_with_text_vec():
class MockLayer(preprocessing.TextVectorization):
def adapt(self, *args, **kwargs):
super().adapt(*args, **kwargs)
self.is_called = True
x_train = test_utils.generate_text_data()
y_train = np.random.randint(0, 2, (100,))
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
layer1 = MockLayer(max_tokens=5000, output_mode="int", output_sequence_length=40)
model = keras.models.Sequential()
model.add(keras.Input(shape=(1,), dtype=tf.string))
model.add(layer1)
model.add(keras.layers.Embedding(50001, 10))
model.add(keras.layers.Dense(1))
tuner_module.AutoTuner.adapt(model, dataset)
assert layer1.is_called
def test_adapt_with_model_with_preprocessing_layer_only():
input_node = keras.Input(shape=(10,))
output_node = keras.layers.experimental.preprocessing.Normalization()(input_node)
model = keras.Model(input_node, output_node)
greedy.Greedy.adapt(
model,
tf.data.Dataset.from_tensor_slices(
(np.random.rand(100, 10), np.random.rand(100, 10))
).batch(32),
)
def test_build_block_in_blocks_with_same_name(tmp_path):
class Block1(ak.Block):
def build(self, hp, inputs):
hp.Boolean("a")
return keras.layers.Dense(3)(tf.nest.flatten(inputs)[0])
class Block2(ak.Block):
def build(self, hp, inputs):
hp.Boolean("b")
return Block1().build(hp, inputs)
inputs = ak.Input()
outputs = Block2()(inputs)
outputs = ak.RegressionHead()(outputs)
auto_model = ak.AutoModel(inputs, outputs, max_trials=5, directory=tmp_path)
auto_model.fit(np.random.rand(100, 5), np.random.rand(100, 1), epochs=1)
trials = [trial for trial_id, trial in auto_model.tuner.oracle.trials.items()]
for trial in trials:
print(trial.hyperparameters.values)
assert len(trial.hyperparameters.values) == len(
trials[0].hyperparameters.values
)
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NULocation(NURESTObject):
""" Represents a Location in the VSD
Notes:
Gateway location details.
"""
__rest_name__ = "location"
__resource_name__ = "locations"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a Location instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> location = NULocation(id=u'xxxx-xxx-xxx-xxx', name=u'Location')
>>> location = NULocation(data=my_dict)
"""
super(NULocation, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._last_updated_date = None
self._latitude = None
self._address = None
self._ignore_geocode = None
self._time_zone_id = None
self._embedded_metadata = None
self._entity_scope = None
self._locality = None
self._longitude = None
self._country = None
self._creation_date = None
self._associated_entity_name = None
self._associated_entity_type = None
self._state = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="latitude", remote_name="latitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ignore_geocode", remote_name="ignoreGeocode", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="time_zone_id", remote_name="timeZoneID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="locality", remote_name="locality", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="longitude", remote_name="longitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="country", remote_name="country", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_name", remote_name="associatedEntityName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="state", remote_name="state", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def latitude(self):
""" Get latitude value.
Notes:
Latitude in decimal format.
"""
return self._latitude
@latitude.setter
def latitude(self, value):
""" Set latitude value.
Notes:
Latitude in decimal format.
"""
self._latitude = value
@property
def address(self):
""" Get address value.
Notes:
Formatted address including property number, street name, suite or office number, ...
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
Formatted address including property number, street name, suite or office number, ...
"""
self._address = value
@property
def ignore_geocode(self):
""" Get ignore_geocode value.
Notes:
Request BSS to perform a geocode on the address - If no value passed, requestGeocode will be set to true
This attribute is named `ignoreGeocode` in VSD API.
"""
return self._ignore_geocode
@ignore_geocode.setter
def ignore_geocode(self, value):
""" Set ignore_geocode value.
Notes:
Request BSS to perform a geocode on the address - If no value passed, requestGeocode will be set to true
This attribute is named `ignoreGeocode` in VSD API.
"""
self._ignore_geocode = value
@property
def time_zone_id(self):
""" Get time_zone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timeZoneID` in VSD API.
"""
return self._time_zone_id
@time_zone_id.setter
def time_zone_id(self, value):
""" Set time_zone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timeZoneID` in VSD API.
"""
self._time_zone_id = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def locality(self):
""" Get locality value.
Notes:
Locality/City/County
"""
return self._locality
@locality.setter
def locality(self, value):
""" Set locality value.
Notes:
Locality/City/County
"""
self._locality = value
@property
def longitude(self):
""" Get longitude value.
Notes:
Longitude in decimal format.
"""
return self._longitude
@longitude.setter
def longitude(self, value):
""" Set longitude value.
Notes:
Longitude in decimal format.
"""
self._longitude = value
@property
def country(self):
""" Get country value.
Notes:
Country
"""
return self._country
@country.setter
def country(self, value):
""" Set country value.
Notes:
Country
"""
self._country = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def associated_entity_name(self):
""" Get associated_entity_name value.
Notes:
Name of the associated entity.
This attribute is named `associatedEntityName` in VSD API.
"""
return self._associated_entity_name
@associated_entity_name.setter
def associated_entity_name(self, value):
""" Set associated_entity_name value.
Notes:
Name of the associated entity.
This attribute is named `associatedEntityName` in VSD API.
"""
self._associated_entity_name = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def state(self):
""" Get state value.
Notes:
State/Province/Region
"""
return self._state
@state.setter
def state(self, value):
""" Set state value.
Notes:
State/Province/Region
"""
self._state = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| |
from datetime import datetime
from collections import OrderedDict
from django.conf import settings
from django.http import Http404
from django.utils import timezone as tz
from django.contrib.auth.models import User
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import permissions
from .simpleviews import SimpleView, SimpleGetMixin
from utils import solr
from . import serializers
from . import filters
from .uris import APIUris
import logging
# set up logger, for debugging
logger = logging.getLogger('sierra.custom')
@api_view(('GET',))
def api_root(request):
utc_offset = tz.get_default_timezone().utcoffset(datetime.now())
utc_offset = utc_offset.total_seconds() / (60*60)
links = {
'self': {
'href': APIUris.get_uri('api-root', req=request,
absolute=True)
},
'apiusers': {
'href': APIUris.get_uri('apiusers-list', req=request,
absolute=True)
},
'bibs': {
'href': APIUris.get_uri('bibs-list', req=request,
absolute=True)
},
'marc': {
'href': APIUris.get_uri('marc-list', req=request,
absolute=True)
},
'items': {
'href': APIUris.get_uri('items-list', req=request,
absolute=True)
},
'eresources': {
'href': APIUris.get_uri('eresources-list', req=request,
absolute=True)
},
'locations': {
'href': APIUris.get_uri('locations-list', req=request,
absolute=True)
},
'itemtypes': {
'href': APIUris.get_uri('itemtypes-list', req=request,
absolute=True)
},
'itemstatuses': {
'href': APIUris.get_uri('itemstatuses-list', req=request,
absolute=True)
},
'callnumbermatches': {
'href': APIUris.get_uri('callnumbermatches-list', req=request,
absolute=True)
},
'firstitemperlocation': {
'href': APIUris.get_uri('firstitemperlocation-list', req=request,
absolute=True)
},
}
ret_val = OrderedDict()
ret_val['catalogApi'] = OrderedDict()
ret_val['catalogApi']['version'] = '1'
ret_val['catalogApi']['_links'] = OrderedDict(sorted(links.items()))
ret_val['serverTime'] = {
'currentTime': tz.now(),
'timezone': tz.get_default_timezone_name(),
'utcOffset': utc_offset
}
return Response(ret_val)
class APIUserList(SimpleGetMixin, SimpleView):
"""
Paginated list of API Users permissions. Requires authorization to
view.
"""
queryset = User.objects.exclude(apiuser__exact=None)
serializer_class = serializers.APIUserSerializer
resource_name = 'apiusers'
permission_classes = (permissions.IsAuthenticated,)
class APIUserDetail(SimpleGetMixin, SimpleView):
"""
View one API User. Requires authorization to view.
"""
queryset = User.objects.exclude(apiuser__exact=None)
serializer_class = serializers.APIUserSerializer
resource_name = 'apiusers'
multi = False
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(username=self.kwargs['id'])[0]
except IndexError:
raise Http404
else:
return obj
class ItemList(SimpleGetMixin, SimpleView):
"""
Paginated list of items. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset().filter(type='Item')
serializer_class = serializers.ItemSerializer
ordering = ['call_number', 'barcode', 'id', 'record_number',
'parent_bib_id', 'parent_bib_record_number', 'volume',
'copy_number', 'checkout_date']
filter_fields = ['record_number', 'call_number', 'volume', 'volume_sort',
'copy_number', 'barcode', 'long_messages', 'internal_notes',
'public_notes', 'local_code1', 'number_of_renewals', 'item_type_code',
'price', 'internal_use_count', 'iuse3_count', 'total_checkout_count',
'total_renewal_count', 'year_to_date_checkout_count',
'last_year_to_date_checkout_count', 'location_code', 'status_code',
'due_date', 'checkout_date', 'last_checkin_date', 'overdue_date',
'recall_date', 'record_creation_date', 'record_last_updated_date',
'record_revision_number', 'suppressed', 'parent_bib_record_number',
'parent_bib_title', 'parent_bib_main_author',
'parent_bib_publication_year', 'call_number_type']
resource_name = 'items'
class ItemDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one item.
"""
queryset = solr.Queryset().filter(type='Item')
serializer_class = serializers.ItemSerializer
resource_name = 'items'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(id=self.kwargs['id'])[0]
except IndexError:
raise Http404
else:
return obj
class BibList(SimpleGetMixin, SimpleView):
"""
Paginated list of bibs. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset(using=
settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Bibs'])
serializer_class = serializers.BibSerializer
ordering = ['call_number', 'id', 'record_number', 'material_type',
'timestamp', 'main_call_number_sort']
filter_fields = ['record_number', 'call_number', 'id', 'suppressed',
'material_type', 'issn_numbers', 'timestamp',
'full_title', 'main_title', 'subtitle',
'statement_of_responsibility', 'uniform_title',
'alternate_titles', 'related_titles', 'series', 'creator',
'contributors', 'series_creators', 'people',
'corporations', 'meetings', 'imprints',
'publication_country', 'publication_places', 'publishers',
'publication_dates', 'full_subjects', 'general_terms',
'topic_terms', 'genre_terms', 'era_terms', 'form_terms',
'other_terms', 'physical_characteristics', 'toc_notes',
'context_notes', 'summary_notes', 'main_call_number',
'loc_call_numbers', 'dewey_call_numbers',
'other_call_numbers', 'sudoc_numbers', 'isbn_numbers',
'lccn_numbers', 'oclc_numbers']
resource_name = 'bibs'
class BibDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one bib.
"""
queryset = solr.Queryset(using=
settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Bibs'])
serializer_class = serializers.BibSerializer
resource_name = 'bibs'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(id=self.kwargs['id'])[0]
except IndexError:
raise Http404
else:
return obj
class MarcList(SimpleGetMixin, SimpleView):
"""
Paginated list of MARC records. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset(using=
settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Marc'])
serializer_class = serializers.MarcSerializer
resource_name = 'marc'
filter_fields = ['record_number', '/^(mf_)?\\d{3}$/',
'/^(sf_)?\\d{3}[a-z0-9]$/']
filter_class = filters.MarcFilter
class MarcDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one MARC record.
"""
queryset = solr.Queryset(using=
settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Marc'])
serializer_class = serializers.MarcSerializer
resource_name = 'marc'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(id=self.kwargs['id'])[0]
except IndexError:
raise Http404
else:
return obj
class EResourceList(SimpleGetMixin, SimpleView):
"""
Paginated list of eresources. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset().filter(type='eResource')
serializer_class = serializers.EResourceSerializer
ordering = ['record_number', 'parent_bib_record_number', 'eresource_type',
'publisher', 'title', 'alert']
filter_fields = ['record_number', 'parent_bib_record_number',
'eresource_type', 'publisher', 'title',
'alternate_titles', 'subjects', 'summary',
'internal_notes', 'public_notes', 'alert', 'holdings',
'suppressed']
resource_name = 'eresources'
class EResourceDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one eresource.
"""
queryset = solr.Queryset().filter(type='eResource')
serializer_class = serializers.EResourceSerializer
resource_name = 'eresources'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(id=self.kwargs['id'])[0]
except IndexError:
raise Http404
else:
return obj
class LocationList(SimpleGetMixin, SimpleView):
"""
Paginated list of bibs. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset().filter(type='Location')
serializer_class = serializers.LocationSerializer
resource_name = 'locations'
ordering = ['code', 'label']
filter_fields = ['code', 'label']
class LocationDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one Location.
"""
queryset = solr.Queryset().filter(type='Location')
serializer_class = serializers.LocationSerializer
resource_name = 'locations'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(code=self.kwargs['code'])[0]
except IndexError:
raise Http404
else:
return obj
class ItemTypesList(SimpleGetMixin, SimpleView):
"""
Paginated list of bibs. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset().filter(type='Itype')
serializer_class = serializers.ItemTypeSerializer
resource_name = 'itemtypes'
ordering = ['code', 'label']
filter_fields = ['code', 'label']
class ItemTypesDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one Location.
"""
queryset = solr.Queryset().filter(type='Itype')
serializer_class = serializers.ItemTypeSerializer
resource_name = 'itemtypes'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(code=self.kwargs['code'])[0]
except IndexError:
raise Http404
else:
return obj
class ItemStatusesList(SimpleGetMixin, SimpleView):
"""
Paginated list of bibs. Use the 'limit' and 'offset' query
parameters for paging.
"""
queryset = solr.Queryset().filter(type='ItemStatus')
serializer_class = serializers.ItemStatusSerializer
resource_name = 'itemstatuses'
ordering = ['code', 'label']
filter_fields = ['code', 'label']
class ItemStatusesDetail(SimpleGetMixin, SimpleView):
"""
Retrieve one Item Status.
"""
queryset = solr.Queryset().filter(type='ItemStatus')
serializer_class = serializers.ItemStatusSerializer
resource_name = 'itemstatuses'
multi = False
def get_object(self):
queryset = self.get_queryset()
try:
obj = queryset.filter(code=self.kwargs['code'])[0]
except IndexError:
raise Http404
else:
return obj
class CallnumbermatchesList(SimpleGetMixin, SimpleView):
"""
Returns the first X matching call numbers, where X is the supplied
limit. Pagination (offset) is not supported.
You can filter using the
following fields: callNumber, locationCode, and callNumberType.
"""
queryset = solr.Queryset().filter(type='Item').only(
'call_number').order_by('call_number_sort')
serializer_class = serializers.ItemSerializer
resource_name = 'callnumber_matches'
filter_fields = ['call_number', 'location_code', 'call_number_type']
def get_page_data(self, queryset, request):
# for paging, we only use the 'limit'
limit_p = settings.REST_FRAMEWORK.get('PAGINATE_BY_PARAM', 'limit')
max_limit = settings.REST_FRAMEWORK.get('MAX_PAGINATE_BY', 500)
default_limit = settings.REST_FRAMEWORK.get('PAGINATE_BY', 10)
limit = int(request.query_params.get(limit_p, default_limit))
limit = max_limit if limit > max_limit else limit
data, i, count = [], 0, queryset.count()
while len(data) < limit and i < count:
call_number = queryset[i].get('call_number', None)
if call_number is not None and call_number not in data:
data.append(call_number)
i += 1
return data
class FirstItemPerLocationList(SimpleGetMixin, SimpleView):
"""
Returns the first item (by call number) for each location within a
filtered result set.
"""
facet_field = 'location_code'
queryset = solr.Queryset().filter(type='Item').search('*:*',
params={'facet': 'true', 'facet.field': facet_field,
'facet.sort': 'index', 'facet.mincount': 1})
serializer_class = serializers.ItemSerializer
resource_name = 'firstitemperlocation'
filter_fields = ['call_number', 'call_number_type', 'barcode']
def get_page_data(self, queryset, request):
ff = self.facet_field
facets = queryset.full_response.facets['facet_fields'][ff]
fields = ['id', 'parent_bib_title', 'parent_bib_record_number',
'call_number', 'barcode', 'record_number', 'call_number_type']
total_count = len(facets) / 2
items = []
for key in facets[0:len(facets):2]:
facet_qs = solr.Queryset()
facet_qs._search_params['fq'] = queryset._search_params['fq']
facet_qs = facet_qs.filter(**{ff: key})
facet_qs = facet_qs.order_by('call_number_sort').only(*fields)
item_uri = APIUris.get_uri('items-detail', id=facet_qs[0]['id'],
req=request, absolute=True)
items.append({
'_links': { 'self': { 'href': item_uri } },
'id': facet_qs[0].get('id', None),
'parentBibRecordNumber':
facet_qs[0].get('parent_bib_record_number', None),
'parentBibTitle': facet_qs[0].get('parent_bib_title', None),
'recordNumber':
facet_qs[0].get('record_number', None),
'callNumber': facet_qs[0].get('call_number', None),
'callNumberType': facet_qs[0].get('call_number_type', None),
'barcode': facet_qs[0].get('barcode', None),
'locationCode': key,
})
data = OrderedDict()
data['totalCount'] = total_count
data['_links'] = {'self': request.build_absolute_uri()}
data['_embedded'] = {'items': items}
return data
| |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os.path
from .. import coredata
from .. import mlog
from ..mesonlib import MesonException, version_compare
from .c import CCompiler, VisualStudioCCompiler, ClangClCCompiler
from .compilers import (
CompilerType,
gnu_winlibs,
msvc_winlibs,
ClangCompiler,
GnuCompiler,
ElbrusCompiler,
IntelCompiler,
ArmCompiler,
ArmclangCompiler,
CcrxCompiler,
)
from .c_function_attributes import CXX_FUNC_ATTRIBUTES
class CPPCompiler(CCompiler):
@classmethod
def attribute_check_func(cls, name):
return CXX_FUNC_ATTRIBUTES.get(name, super().attribute_check_func(name))
def __init__(self, exelist, version, is_cross, exe_wrap, **kwargs):
# If a child ObjCPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'cpp'
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
def get_display_language(self):
return 'C++'
def get_no_stdinc_args(self):
return ['-nostdinc++']
def sanity_check(self, work_dir, environment):
code = 'class breakCCompiler;int main(int argc, char **argv) { return 0; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckcpp.cc', code)
def get_compiler_check_args(self):
# -fpermissive allows non-conforming code to compile which is necessary
# for many C++ checks. Particularly, the has_header_symbol check is
# too strict without this and always fails.
return super().get_compiler_check_args() + ['-fpermissive']
def has_header_symbol(self, hname, symbol, prefix, env, extra_args=None, dependencies=None):
# Check if it's a C-like symbol
if super().has_header_symbol(hname, symbol, prefix, env, extra_args, dependencies):
return True
# Check if it's a class or a template
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}
t = '''{prefix}
#include <{header}>
using {symbol};
int main () {{ return 0; }}'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
def _test_cpp_std_arg(self, cpp_std_value):
# Test whether the compiler understands a -std=XY argument
assert(cpp_std_value.startswith('-std='))
# This test does not use has_multi_arguments() for two reasons:
# 1. has_multi_arguments() requires an env argument, which the compiler
# object does not have at this point.
# 2. even if it did have an env object, that might contain another more
# recent -std= argument, which might lead to a cascaded failure.
CPP_TEST = 'int i = static_cast<int>(0);'
with self.compile(code=CPP_TEST, extra_args=[cpp_std_value], mode='compile') as p:
if p.returncode == 0:
mlog.debug('Compiler accepts {}:'.format(cpp_std_value), 'YES')
return True
else:
mlog.debug('Compiler accepts {}:'.format(cpp_std_value), 'NO')
return False
@functools.lru_cache()
def _find_best_cpp_std(self, cpp_std):
# The initial version mapping approach to make falling back
# from '-std=c++14' to '-std=c++1y' was too brittle. For instance,
# Apple's Clang uses a different versioning scheme to upstream LLVM,
# making the whole detection logic awfully brittle. Instead, let's
# just see if feeding GCC or Clang our '-std=' setting works, and
# if not, try the fallback argument.
CPP_FALLBACKS = {
'c++11': 'c++0x',
'gnu++11': 'gnu++0x',
'c++14': 'c++1y',
'gnu++14': 'gnu++1y',
'c++17': 'c++1z',
'gnu++17': 'gnu++1z'
}
# Currently, remapping is only supported for Clang and GCC
assert(self.id in frozenset(['clang', 'gcc']))
if cpp_std not in CPP_FALLBACKS:
# 'c++03' and 'c++98' don't have fallback types
return '-std=' + cpp_std
for i in (cpp_std, CPP_FALLBACKS[cpp_std]):
cpp_std_value = '-std=' + i
if self._test_cpp_std_arg(cpp_std_value):
return cpp_std_value
raise MesonException('C++ Compiler does not support -std={}'.format(cpp_std))
class ClangCPPCompiler(ClangCompiler, CPPCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ClangCompiler.__init__(self, compiler_type)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z', 'c++2a',
'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z', 'gnu++2a'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append(self._find_best_cpp_std(std.value))
return args
def get_option_link_args(self, options):
return []
def language_stdlib_only_link_flags(self):
return ['-lstdc++']
class ArmclangCPPCompiler(ArmclangCompiler, CPPCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ArmclangCompiler.__init__(self, compiler_type)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17',
'gnu++98', 'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
class GnuCPPCompiler(GnuCompiler, CPPCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap, defines, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
GnuCompiler.__init__(self, compiler_type, defines)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z', 'c++2a',
'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z', 'gnu++2a'],
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)})
if self.compiler_type.is_windows_compiler:
opts.update({
'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs', 'Standard Win libraries to link against',
gnu_winlibs), })
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append(self._find_best_cpp_std(std.value))
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
if self.compiler_type.is_windows_compiler:
return options['cpp_winlibs'].value[:]
return []
def get_pch_use_args(self, pch_dir, header):
return ['-fpch-preprocess', '-include', os.path.basename(header)]
def language_stdlib_only_link_flags(self):
return ['-lstdc++']
class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrapper=None, defines=None, **kwargs):
GnuCPPCompiler.__init__(self, exelist, version, compiler_type, is_cross, exe_wrapper, defines, **kwargs)
ElbrusCompiler.__init__(self, compiler_type, defines)
# It does not support c++/gnu++ 17 and 1z, but still does support 0x, 1y, and gnu++98.
def get_options(self):
opts = CPPCompiler.get_options(self)
opts['cpp_std'] = coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++0x', 'c++11', 'c++14', 'c++1y',
'gnu++98', 'gnu++03', 'gnu++0x', 'gnu++11', 'gnu++14', 'gnu++1y'],
'none')
return opts
# Elbrus C++ compiler does not have lchmod, but there is only linker warning, not compiler error.
# So we should explicitly fail at this case.
def has_function(self, funcname, prefix, env, extra_args=None, dependencies=None):
if funcname == 'lchmod':
return False
else:
return super().has_function(funcname, prefix, env, extra_args, dependencies)
class IntelCPPCompiler(IntelCompiler, CPPCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
IntelCompiler.__init__(self, compiler_type)
self.lang_header = 'c++-header'
default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',
'-Wpch-messages', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra']}
def get_options(self):
opts = CPPCompiler.get_options(self)
# Every Unix compiler under the sun seems to accept -std=c++03,
# with the exception of ICC. Instead of preventing the user from
# globally requesting C++03, we transparently remap it to C++98
c_stds = ['c++98', 'c++03']
g_stds = ['gnu++98', 'gnu++03']
if version_compare(self.version, '>=15.0.0'):
c_stds += ['c++11', 'c++14']
g_stds += ['gnu++11']
if version_compare(self.version, '>=16.0.0'):
c_stds += ['c++17']
if version_compare(self.version, '>=17.0.0'):
g_stds += ['gnu++14']
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none'] + c_stds + g_stds,
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
remap_cpp03 = {
'c++03': 'c++98',
'gnu++03': 'gnu++98'
}
args.append('-std=' + remap_cpp03.get(std.value, std.value))
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
return []
class VisualStudioCPPCompiler(VisualStudioCCompiler, CPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap, is_64):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
VisualStudioCCompiler.__init__(self, exelist, version, is_cross, exe_wrap, is_64)
self.base_options = ['b_pch', 'b_vscrt'] # FIXME add lto, pgo and the like
def get_options(self):
cpp_stds = ['none', 'c++11', 'vc++11']
if self.id == 'clang-cl':
cpp_stds.extend(['c++14', 'vc++14', 'c++17', 'vc++17', 'c++latest'])
else:
# Visual Studio 2015 and later
if version_compare(self.version, '>=19'):
cpp_stds.extend(['c++14', 'vc++14', 'c++latest', 'vc++latest'])
# Visual Studio 2017 and later
if version_compare(self.version, '>=19.11'):
cpp_stds.extend(['c++17', 'vc++17'])
opts = CPPCompiler.get_options(self)
opts.update({'cpp_eh': coredata.UserComboOption('cpp_eh',
'C++ exception handling type.',
['none', 'a', 's', 'sc'],
'sc'),
'cpp_std': coredata.UserComboOption('cpp_std',
'C++ language standard to use',
cpp_stds,
'none'),
'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs',
'Windows libs to link against.',
msvc_winlibs)})
return opts
def get_option_compile_args(self, options):
args = []
eh = options['cpp_eh']
if eh.value != 'none':
args.append('/EH' + eh.value)
vc_version_map = {
'none': (True, None),
'vc++11': (True, 11),
'vc++14': (True, 14),
'vc++17': (True, 17),
'c++11': (False, 11),
'c++14': (False, 14),
'c++17': (False, 17)}
permissive, ver = vc_version_map[options['cpp_std'].value]
if ver is None:
pass
elif ver == 11:
# Note: there is no explicit flag for supporting C++11; we attempt to do the best we can
# which means setting the C++ standard version to C++14, in compilers that support it
# (i.e., after VS2015U3)
# if one is using anything before that point, one cannot set the standard.
if self.id == 'clang-cl' or version_compare(self.version, '>=19.00.24210'):
mlog.warning('MSVC does not support C++11; '
'attempting best effort; setting the standard to C++14')
args.append('/std:c++14')
else:
mlog.warning('This version of MSVC does not support cpp_std arguments')
else:
args.append('/std:c++{}'.format(ver))
if not permissive and version_compare(self.version, '>=19.11'):
args.append('/permissive-')
return args
def get_option_link_args(self, options):
return options['cpp_winlibs'].value[:]
def get_compiler_check_args(self):
# Visual Studio C++ compiler doesn't support -fpermissive,
# so just use the plain C args.
return VisualStudioCCompiler.get_compiler_check_args(self)
class ClangClCPPCompiler(VisualStudioCPPCompiler, ClangClCCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap, is_64):
VisualStudioCPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, is_64)
self.id = 'clang-cl'
class ArmCPPCompiler(ArmCompiler, CPPCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
ArmCompiler.__init__(self, compiler_type)
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++03', 'c++11'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value == 'c++11':
args.append('--cpp11')
elif std.value == 'c++03':
args.append('--cpp')
return args
def get_option_link_args(self, options):
return []
def get_compiler_check_args(self):
return []
class CcrxCPPCompiler(CcrxCompiler, CPPCompiler):
def __init__(self, exelist, version, compiler_type, is_cross, exe_wrap=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
CcrxCompiler.__init__(self, compiler_type)
# Override CCompiler.get_always_args
def get_always_args(self):
return ['-nologo', '-lang=cpp']
def get_option_compile_args(self, options):
return []
def get_compile_only_args(self):
return []
def get_output_args(self, target):
return ['-output=obj=%s' % target]
def get_linker_output_args(self, outputname):
return ['-output=%s' % outputname]
def get_option_link_args(self, options):
return []
def get_compiler_check_args(self):
return []
| |
# Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
from future.utils import iterkeys
from future import standard_library
standard_library.install_aliases()
import logging
import os
import requests
import threading
import traceback
from subprocess import PIPE
from ycmd import utils, responses
from ycmd.completers.completer import Completer
from ycmd.completers.completer_utils import GetFileContents
_logger = logging.getLogger( __name__ )
PATH_TO_TERN_BINARY = os.path.abspath(
os.path.join(
os.path.dirname( __file__ ),
'..',
'..',
'..',
'third_party',
'tern_runtime',
'node_modules',
'tern',
'bin',
'tern' ) )
PATH_TO_NODE = utils.PathToFirstExistingExecutable( [ 'node' ] )
# host name/address on which the tern server should listen
# note: we use 127.0.0.1 rather than localhost because on some platforms
# localhost might not be correctly configured as an alias for the loopback
# address. (ahem: Windows)
SERVER_HOST = '127.0.0.1'
def ShouldEnableTernCompleter():
"""Returns whether or not the tern completer is 'installed'. That is whether
or not the tern submodule has a 'node_modules' directory. This is pretty much
the only way we can know if the user added '--tern-completer' on
install or manually ran 'npm install' in the tern submodule directory."""
if not PATH_TO_NODE:
_logger.warning( 'Not using Tern completer: unable to find node' )
return False
_logger.info( 'Using node binary from: ' + PATH_TO_NODE )
installed = os.path.exists( PATH_TO_TERN_BINARY )
if not installed:
_logger.info( 'Not using Tern completer: not installed at ' +
PATH_TO_TERN_BINARY )
return False
return True
def GlobalConfigExists( tern_config ):
"""Returns whether or not the global config file with the supplied path
exists. This method primarily exists to allow testability and simply returns
whether the supplied file exists."""
return os.path.exists( tern_config )
def FindTernProjectFile( starting_directory ):
for folder in utils.PathsToAllParentFolders( starting_directory ):
tern_project = os.path.join( folder, '.tern-project' )
if os.path.exists( tern_project ):
return tern_project
# As described here: http://ternjs.net/doc/manual.html#server a global
# .tern-config file is also supported for the Tern server. This can provide
# meaningful defaults (for libs, and possibly also for require paths), so
# don't warn if we find one. The point is that if the user has a .tern-config
# set up, then she has deliberately done so and a ycmd warning is unlikely
# to be anything other than annoying.
tern_config = os.path.expanduser( '~/.tern-config' )
if GlobalConfigExists( tern_config ):
return tern_config
return None
class TernCompleter( Completer ):
"""Completer for JavaScript using tern.js: http://ternjs.net.
The protocol is defined here: http://ternjs.net/doc/manual.html#protocol"""
def __init__( self, user_options ):
super( TernCompleter, self ).__init__( user_options )
self._server_keep_logfiles = user_options[ 'server_keep_logfiles' ]
# Used to ensure that starting/stopping of the server is synchronised
self._server_state_mutex = threading.RLock()
self._do_tern_project_check = False
with self._server_state_mutex:
self._server_stdout = None
self._server_stderr = None
self._Reset()
self._StartServer()
def _WarnIfMissingTernProject( self ):
# The Tern server will operate without a .tern-project file. However, it
# does not operate optimally, and will likely lead to issues reported that
# JavaScript completion is not working properly. So we raise a warning if we
# aren't able to detect some semblance of manual Tern configuration.
# We do this check after the server has started because the server does
# have nonzero use without a project file, however limited. We only do this
# check once, though because the server can only handle one project at a
# time. This doesn't catch opening a file which is not part of the project
# or any of those things, but we can only do so much. We'd like to enhance
# ycmd to handle this better, but that is a FIXME for now.
if self._ServerIsRunning() and self._do_tern_project_check:
self._do_tern_project_check = False
tern_project = FindTernProjectFile( os.getcwd() )
if not tern_project:
_logger.warning( 'No .tern-project file detected: ' + os.getcwd() )
raise RuntimeError( 'Warning: Unable to detect a .tern-project file '
'in the hierarchy before ' + os.getcwd() +
' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
else:
_logger.info( 'Detected .tern-project file at: ' + tern_project )
def _GetServerAddress( self ):
return 'http://' + SERVER_HOST + ':' + str( self._server_port )
def ComputeCandidatesInner( self, request_data ):
query = {
'type': 'completions',
'types': True,
'docs': True,
'filter': False,
'caseInsensitive': True,
'guess': False,
'sort': False,
'includeKeywords': False,
'expandWordForward': False,
'omitObjectPrototype': False
}
completions = self._GetResponse( query,
request_data[ 'start_codepoint' ],
request_data ).get( 'completions', [] )
def BuildDoc( completion ):
doc = completion.get( 'type', 'Unknown type' )
if 'doc' in completion:
doc = doc + '\n' + completion[ 'doc' ]
return doc
return [ responses.BuildCompletionData( completion[ 'name' ],
completion.get( 'type', '?' ),
BuildDoc( completion ) )
for completion in completions ]
def OnFileReadyToParse( self, request_data ):
self._WarnIfMissingTernProject()
# Keep tern server up to date with the file data. We do this by sending an
# empty request just containing the file data
try:
self._PostRequest( {}, request_data )
except:
# The server might not be ready yet or the server might not be running.
# in any case, just ignore this we'll hopefully get another parse request
# soon.
pass
def GetSubcommandsMap( self ):
return {
'RestartServer': ( lambda self, request_data, args:
self._RestartServer() ),
'StopServer': ( lambda self, request_data, args:
self._StopServer() ),
'GoToDefinition': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoTo': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences': ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GetType': ( lambda self, request_data, args:
self._GetType( request_data) ),
'GetDoc': ( lambda self, request_data, args:
self._GetDoc( request_data) ),
'RefactorRename': ( lambda self, request_data, args:
self._Rename( request_data, args ) ),
}
def SupportedFiletypes( self ):
return [ 'javascript' ]
def DebugInfo( self, request_data ):
with self._server_state_mutex:
if self._ServerIsRunning():
return ( 'JavaScript completer debug information:\n'
' Tern running at: {0}\n'
' Tern process ID: {1}\n'
' Tern executable: {2}\n'
' Tern logfiles:\n'
' {3}\n'
' {4}'.format( self._GetServerAddress(),
self._server_handle.pid,
PATH_TO_TERN_BINARY,
self._server_stdout,
self._server_stderr ) )
if self._server_stdout and self._server_stderr:
return ( 'JavaScript completer debug information:\n'
' Tern no longer running\n'
' Tern executable: {0}\n'
' Tern logfiles:\n'
' {1}\n'
' {2}\n'.format( PATH_TO_TERN_BINARY,
self._server_stdout,
self._server_stderr ) )
return ( 'JavaScript completer debug information:\n'
' Tern is not running\n'
' Tern executable: {0}'.format( PATH_TO_TERN_BINARY ) )
def Shutdown( self ):
_logger.debug( "Shutting down Tern server" )
self._StopServer()
def ServerIsHealthy( self, request_data = {} ):
if not self._ServerIsRunning():
return False
try:
target = self._GetServerAddress() + '/ping'
response = requests.get( target )
return response.status_code == requests.codes.ok
except requests.ConnectionError:
return False
def _Reset( self ):
with self._server_state_mutex:
if not self._server_keep_logfiles:
if self._server_stdout:
utils.RemoveIfExists( self._server_stdout )
self._server_stdout = None
if self._server_stderr:
utils.RemoveIfExists( self._server_stderr )
self._server_stderr = None
self._server_handle = None
self._server_port = 0
def _PostRequest( self, request, request_data ):
"""Send a raw request with the supplied request block, and
return the server's response. If the server is not running, it is started.
This method is useful where the query block is not supplied, i.e. where just
the files are being updated.
The request block should contain the optional query block only. The file
data are added automatically."""
if not self._ServerIsRunning():
raise ValueError( 'Not connected to server' )
def MakeIncompleteFile( name, file_data ):
return {
'type': 'full',
'name': name,
'text': file_data[ 'contents' ],
}
file_data = request_data.get( 'file_data', {} )
full_request = {
'files': [ MakeIncompleteFile( x, file_data[ x ] )
for x in iterkeys( file_data )
if 'javascript' in file_data[ x ][ 'filetypes' ] ],
}
full_request.update( request )
response = requests.post( self._GetServerAddress(),
json = full_request )
if response.status_code != requests.codes.ok:
raise RuntimeError( response.text )
return response.json()
def _GetResponse( self, query, codepoint, request_data ):
"""Send a standard file/line request with the supplied query block, and
return the server's response. If the server is not running, it is started.
This method should be used for almost all requests. The exception is when
just updating file data in which case _PostRequest should be used directly.
The query block should contain the type and any parameters. The files,
position, etc. are added automatically.
NOTE: the |codepoint| parameter is usually the current cursor position,
though it should be the "completion start column" codepoint for completion
requests."""
def MakeTernLocation( request_data ):
return {
'line': request_data[ 'line_num' ] - 1,
'ch': codepoint - 1
}
full_query = {
'file': request_data[ 'filepath' ],
'end': MakeTernLocation( request_data ),
'lineCharPositions': True,
}
full_query.update( query )
return self._PostRequest( { 'query': full_query }, request_data )
# TODO: this function is way too long. Consider refactoring it.
def _StartServer( self ):
with self._server_state_mutex:
if self._ServerIsRunning():
return
_logger.info( 'Starting Tern server...' )
self._server_port = utils.GetUnusedLocalhostPort()
if _logger.isEnabledFor( logging.DEBUG ):
extra_args = [ '--verbose' ]
else:
extra_args = []
command = [ PATH_TO_NODE,
PATH_TO_TERN_BINARY,
'--port',
str( self._server_port ),
'--host',
SERVER_HOST,
'--persistent',
'--no-port-file' ] + extra_args
_logger.debug( 'Starting tern with the following command: '
+ ' '.join( command ) )
try:
logfile_format = os.path.join( utils.PathToCreatedTempDir(),
u'tern_{port}_{std}.log' )
self._server_stdout = logfile_format.format(
port = self._server_port,
std = 'stdout' )
self._server_stderr = logfile_format.format(
port = self._server_port,
std = 'stderr' )
# We need to open a pipe to stdin or the Tern server is killed.
# See https://github.com/ternjs/tern/issues/740#issuecomment-203979749
# For unknown reasons, this is only needed on Windows and for Python
# 3.4+ on other platforms.
with utils.OpenForStdHandle( self._server_stdout ) as stdout:
with utils.OpenForStdHandle( self._server_stderr ) as stderr:
self._server_handle = utils.SafePopen( command,
stdin = PIPE,
stdout = stdout,
stderr = stderr )
except Exception:
_logger.warning( 'Unable to start Tern server: '
+ traceback.format_exc() )
self._Reset()
if self._server_port > 0 and self._ServerIsRunning():
_logger.info( 'Tern Server started with pid: ' +
str( self._server_handle.pid ) +
' listening on port ' +
str( self._server_port ) )
_logger.info( 'Tern Server log files are: ' +
self._server_stdout +
' and ' +
self._server_stderr )
self._do_tern_project_check = True
else:
_logger.warning( 'Tern server did not start successfully' )
def _RestartServer( self ):
with self._server_state_mutex:
self._StopServer()
self._StartServer()
def _StopServer( self ):
with self._server_state_mutex:
if self._ServerIsRunning():
_logger.info( 'Stopping Tern server with PID {0}'.format(
self._server_handle.pid ) )
self._server_handle.terminate()
try:
utils.WaitUntilProcessIsTerminated( self._server_handle,
timeout = 5 )
_logger.info( 'Tern server stopped' )
except RuntimeError:
_logger.exception( 'Error while stopping Tern server' )
self._Reset()
def _ServerIsRunning( self ):
return utils.ProcessIsRunning( self._server_handle )
def _GetType( self, request_data ):
query = {
'type': 'type',
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
return responses.BuildDisplayMessageResponse( response[ 'type' ] )
def _GetDoc( self, request_data ):
# Note: we use the 'type' request because this is the best
# way to get the name, type and doc string. The 'documentation' request
# doesn't return the 'name' (strangely), wheras the 'type' request returns
# the same docs with extra info.
query = {
'type': 'type',
'docFormat': 'full',
'types': True
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
doc_string = 'Name: {name}\nType: {type}\n\n{doc}'.format(
name = response.get( 'name', 'Unknown' ),
type = response.get( 'type', 'Unknown' ),
doc = response.get( 'doc', 'No documentation available' ) )
return responses.BuildDetailedInfoResponse( doc_string )
def _GoToDefinition( self, request_data ):
query = {
'type': 'definition',
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
return responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
response[ 'file' ] ) ),
response[ 'file' ],
response[ 'start' ][ 'line' ],
response[ 'start' ][ 'ch' ] ) )
def _GoToReferences( self, request_data ):
query = {
'type': 'refs',
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
return [
responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
ref[ 'file' ] ) ),
ref[ 'file' ],
ref[ 'start' ][ 'line' ],
ref[ 'start' ][ 'ch' ] ) )
for ref in response[ 'refs' ]
]
def _Rename( self, request_data, args ):
if len( args ) != 1:
raise ValueError( 'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
query = {
'type': 'rename',
'newName': args[ 0 ],
}
response = self._GetResponse( query,
request_data[ 'column_codepoint' ],
request_data )
# Tern response format:
# 'changes': [
# {
# 'file'
# 'start' {
# 'line'
# 'ch' (codepoint offset)
# }
# 'end' {
# 'line'
# 'ch' (codepoint offset)
# }
# 'text'
# }
# ]
# ycmd response format:
#
# {
# 'fixits': [
# 'chunks': (list<Chunk>) [
# {
# 'replacement_text',
# 'range' (Range) {
# 'start_' (Location): {
# 'line_number_',
# 'column_number_', (byte offset)
# 'filename_'
# },
# 'end_' (Location): {
# 'line_number_',
# 'column_number_', (byte offset)
# 'filename_'
# }
# }
# }
# ],
# 'location' (Location) {
# 'line_number_',
# 'column_number_',
# 'filename_'
# }
#
# ]
# }
def BuildRange( file_contents, filename, start, end ):
return responses.Range(
_BuildLocation( file_contents,
filename,
start[ 'line' ],
start[ 'ch' ] ),
_BuildLocation( file_contents,
filename,
end[ 'line' ],
end[ 'ch' ] ) )
def BuildFixItChunk( change ):
filename = os.path.abspath( change[ 'file' ] )
file_contents = utils.SplitLines( GetFileContents( request_data,
filename ) )
return responses.FixItChunk(
change[ 'text' ],
BuildRange( file_contents,
filename,
change[ 'start' ],
change[ 'end' ] ) )
# From an API perspective, Refactor and FixIt are the same thing - it just
# applies a set of changes to a set of files. So we re-use all of the
# existing FixIt infrastructure.
return responses.BuildFixItResponse( [
responses.FixIt(
responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] ),
[ BuildFixItChunk( x ) for x in response[ 'changes' ] ] ) ] )
def _BuildLocation( file_contents, filename, line, ch ):
# tern returns codepoint offsets, but we need byte offsets, so we must
# convert
return responses.Location(
line = line + 1,
column = utils.CodepointOffsetToByteOffset( file_contents[ line ],
ch + 1 ),
filename = os.path.realpath( filename ) )
| |
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import sys
import pwd
import errno
from os.path import join, dirname, normpath, exists, isdir
from optparse import OptionParser
try:
from ConfigParser import ConfigParser
# ConfigParser is renamed to configparser in py3
except ImportError:
from configparser import ConfigParser
from carbon import log, state
from carbon.database import TimeSeriesDatabase
from carbon.routers import DatapointRouter
from carbon.exceptions import CarbonConfigException
from twisted.python import usage
defaults = dict(
USER="",
MAX_CACHE_SIZE=float('inf'),
MAX_UPDATES_PER_SECOND=500,
MAX_CREATES_PER_MINUTE=float('inf'),
MIN_TIMESTAMP_RESOLUTION=0,
MIN_TIMESTAMP_LAG=0,
LINE_RECEIVER_INTERFACE='0.0.0.0',
LINE_RECEIVER_PORT=2003,
ENABLE_UDP_LISTENER=False,
UDP_RECEIVER_INTERFACE='0.0.0.0',
UDP_RECEIVER_PORT=2003,
PICKLE_RECEIVER_INTERFACE='0.0.0.0',
PICKLE_RECEIVER_PORT=2004,
MAX_RECEIVER_CONNECTIONS=float('inf'),
CACHE_QUERY_INTERFACE='0.0.0.0',
CACHE_QUERY_PORT=7002,
LOG_UPDATES=True,
LOG_CREATES=True,
LOG_CACHE_HITS=True,
LOG_CACHE_QUEUE_SORTS=True,
DATABASE='whisper',
WHISPER_AUTOFLUSH=False,
WHISPER_SPARSE_CREATE=False,
WHISPER_FALLOCATE_CREATE=False,
WHISPER_LOCK_WRITES=False,
WHISPER_FADVISE_RANDOM=False,
CERES_MAX_SLICE_GAP=80,
CERES_NODE_CACHING_BEHAVIOR='all',
CERES_SLICE_CACHING_BEHAVIOR='latest',
CERES_LOCK_WRITES=False,
MAX_DATAPOINTS_PER_MESSAGE=500,
MAX_AGGREGATION_INTERVALS=5,
FORWARD_ALL=True,
MAX_QUEUE_SIZE=1000,
QUEUE_LOW_WATERMARK_PCT=0.8,
TIME_TO_DEFER_SENDING=0.0001,
ENABLE_AMQP=False,
AMQP_METRIC_NAME_IN_BODY=False,
AMQP_VERBOSE=False,
AMQP_SPEC=None,
BIND_PATTERNS=['#'],
GRAPHITE_URL='http://127.0.0.1:80',
ENABLE_TAGS=True,
TAG_UPDATE_INTERVAL=100,
TAG_BATCH_SIZE=100,
TAG_QUEUE_SIZE=10000,
TAG_HASH_FILENAMES=True,
TAG_RELAY_NORMALIZED=False,
ENABLE_MANHOLE=False,
MANHOLE_INTERFACE='127.0.0.1',
MANHOLE_PORT=7222,
MANHOLE_USER="",
MANHOLE_PUBLIC_KEY="",
RELAY_METHOD='rules',
DYNAMIC_ROUTER=False,
DYNAMIC_ROUTER_MAX_RETRIES=5,
ROUTER_HASH_TYPE=None,
REPLICATION_FACTOR=1,
DIVERSE_REPLICAS=True,
DESTINATIONS=[],
DESTINATION_PROTOCOL="pickle",
DESTINATION_TRANSPORT="none",
DESTINATION_SSL_CA=None,
DESTINATION_POOL_REPLICAS=False,
USE_FLOW_CONTROL=True,
USE_INSECURE_UNPICKLER=False,
USE_WHITELIST=False,
CARBON_METRIC_PREFIX='carbon',
CARBON_METRIC_INTERVAL=60,
CACHE_WRITE_STRATEGY='sorted',
WRITE_BACK_FREQUENCY=None,
MIN_RESET_STAT_FLOW=1000,
MIN_RESET_RATIO=0.9,
MIN_RESET_INTERVAL=121,
TCP_KEEPALIVE=True,
TCP_KEEPIDLE=10,
TCP_KEEPINTVL=30,
TCP_KEEPCNT=2,
USE_RATIO_RESET=False,
LOG_LISTENER_CONN_SUCCESS=True,
LOG_AGGREGATOR_MISSES=True,
AGGREGATION_RULES='aggregation-rules.conf',
REWRITE_RULES='rewrite-rules.conf',
RELAY_RULES='relay-rules.conf',
ENABLE_LOGROTATION=True,
METRIC_CLIENT_IDLE_TIMEOUT=None,
CACHE_METRIC_NAMES_MAX=0,
CACHE_METRIC_NAMES_TTL=0,
RAVEN_DSN=None,
PICKLE_RECEIVER_MAX_LENGTH=2**20,
)
def _process_alive(pid):
if exists("/proc"):
return exists("/proc/%d" % pid)
else:
try:
os.kill(int(pid), 0)
return True
except OSError as err:
return err.errno == errno.EPERM
class OrderedConfigParser(ConfigParser):
"""Hacky workaround to ensure sections are always returned in the order
they are defined in. Note that this does *not* make any guarantees about
the order of options within a section or the order in which sections get
written back to disk on write()."""
_ordered_sections = []
def read(self, path):
# Verifies a file exists *and* is readable
if not os.access(path, os.R_OK):
raise CarbonConfigException("Error: Missing config file or wrong perms on %s" % path)
result = ConfigParser.read(self, path)
sections = []
with open(path) as f:
for line in f:
line = line.strip()
if line.startswith('[') and line.endswith(']'):
sections.append(line[1:-1])
self._ordered_sections = sections
return result
def sections(self):
return list(self._ordered_sections) # return a copy for safety
class Settings(dict):
__getattr__ = dict.__getitem__
def __init__(self):
dict.__init__(self)
self.update(defaults)
def readFrom(self, path, section):
parser = ConfigParser()
if not parser.read(path):
raise CarbonConfigException("Failed to read config file %s" % path)
if not parser.has_section(section):
return
for key, value in parser.items(section):
key = key.upper()
# Detect type from defaults dict
if key in defaults:
valueType = type(defaults[key])
else:
valueType = str
if valueType is list:
value = [v.strip() for v in value.split(',')]
elif valueType is bool:
value = parser.getboolean(section, key)
else:
# Attempt to figure out numeric types automatically
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
self[key] = value
settings = Settings()
settings.update(defaults)
class CarbonCacheOptions(usage.Options):
optFlags = [
["debug", "", "Run in debug mode."],
]
optParameters = [
["config", "c", None, "Use the given config file."],
["instance", "", "a", "Manage a specific carbon instance."],
["logdir", "", None, "Write logs to the given directory."],
["whitelist", "", None, "List of metric patterns to allow."],
["blacklist", "", None, "List of metric patterns to disallow."],
]
def postOptions(self):
global settings
program = self.parent.subCommand
# Use provided pidfile (if any) as default for configuration. If it's
# set to 'twistd.pid', that means no value was provided and the default
# was used.
pidfile = self.parent["pidfile"]
if pidfile.endswith("twistd.pid"):
pidfile = None
self["pidfile"] = pidfile
# Enforce a default umask of '022' if none was set.
if "umask" not in self.parent or self.parent["umask"] is None:
self.parent["umask"] = 0o022
# Read extra settings from the configuration file.
program_settings = read_config(program, self)
settings.update(program_settings)
settings["program"] = program
# Normalize and expand paths
def cleanpath(path):
return os.path.normpath(os.path.expanduser(path))
settings["STORAGE_DIR"] = cleanpath(settings["STORAGE_DIR"])
settings["LOCAL_DATA_DIR"] = cleanpath(settings["LOCAL_DATA_DIR"])
settings["WHITELISTS_DIR"] = cleanpath(settings["WHITELISTS_DIR"])
settings["PID_DIR"] = cleanpath(settings["PID_DIR"])
settings["LOG_DIR"] = cleanpath(settings["LOG_DIR"])
settings["pidfile"] = cleanpath(settings["pidfile"])
# Set process uid/gid by changing the parent config, if a user was
# provided in the configuration file.
if settings.USER:
self.parent["uid"], self.parent["gid"] = (
pwd.getpwnam(settings.USER)[2:4])
# Set the pidfile in parent config to the value that was computed by
# C{read_config}.
self.parent["pidfile"] = settings["pidfile"]
storage_schemas = join(settings["CONF_DIR"], "storage-schemas.conf")
if not exists(storage_schemas):
print("Error: missing required config %s" % storage_schemas)
sys.exit(1)
if settings.CACHE_WRITE_STRATEGY not in ('timesorted', 'sorted', 'max', 'naive'):
log.err("%s is not a valid value for CACHE_WRITE_STRATEGY, defaulting to %s" %
(settings.CACHE_WRITE_STRATEGY, defaults['CACHE_WRITE_STRATEGY']))
else:
log.msg("Using %s write strategy for cache" % settings.CACHE_WRITE_STRATEGY)
# Database-specific settings
database = settings.DATABASE
if database not in TimeSeriesDatabase.plugins:
print("No database plugin implemented for '%s'" % database)
raise SystemExit(1)
database_class = TimeSeriesDatabase.plugins[database]
state.database = database_class(settings)
settings.CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95
if "action" not in self:
self["action"] = "start"
self.handleAction()
# If we are not running in debug mode or non-daemon mode, then log to a
# directory, otherwise log output will go to stdout. If parent options
# are set to log to syslog, then use that instead.
if not self["debug"]:
if self.parent.get("syslog", None):
prefix = "%s-%s[%d]" % (program, self["instance"], os.getpid())
log.logToSyslog(prefix)
elif not self.parent["nodaemon"]:
logdir = settings.LOG_DIR
if not isdir(logdir):
os.makedirs(logdir)
if settings.USER:
# We have not yet switched to the specified user,
# but that user must be able to create files in this
# directory.
os.chown(logdir, self.parent["uid"], self.parent["gid"])
log.logToDir(logdir)
if self["whitelist"] is None:
self["whitelist"] = join(settings["CONF_DIR"], "whitelist.conf")
settings["whitelist"] = self["whitelist"]
if self["blacklist"] is None:
self["blacklist"] = join(settings["CONF_DIR"], "blacklist.conf")
settings["blacklist"] = self["blacklist"]
def parseArgs(self, *action):
"""If an action was provided, store it for further processing."""
if len(action) == 1:
self["action"] = action[0]
def handleAction(self):
"""Handle extra argument for backwards-compatibility.
* C{start} will simply do minimal pid checking and otherwise let twistd
take over.
* C{stop} will kill an existing running process if it matches the
C{pidfile} contents.
* C{status} will simply report if the process is up or not.
"""
action = self["action"]
pidfile = self.parent["pidfile"]
program = settings["program"]
instance = self["instance"]
if action == "stop":
if not exists(pidfile):
print("Pidfile %s does not exist" % pidfile)
raise SystemExit(0)
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except ValueError:
print("Failed to parse pid from pidfile %s" % pidfile)
pf.close()
try:
print("removing corrupted pidfile %s" % pidfile)
os.unlink(pidfile)
except IOError:
print("Could not remove pidfile %s" % pidfile)
raise SystemExit(1)
except IOError:
print("Could not read pidfile %s" % pidfile)
raise SystemExit(1)
print("Sending kill signal to pid %d" % pid)
try:
os.kill(pid, 15)
except OSError as e:
if e.errno == errno.ESRCH:
print("No process with pid %d running" % pid)
else:
raise
raise SystemExit(0)
elif action == "status":
if not exists(pidfile):
print("%s (instance %s) is not running" % (program, instance))
raise SystemExit(1)
pf = open(pidfile, "r")
try:
pid = int(pf.read().strip())
pf.close()
except ValueError:
print("Failed to parse pid from pidfile %s" % pidfile)
pf.close()
try:
print("removing corrupted pidfile %s" % pidfile)
os.unlink(pidfile)
except IOError:
print("Could not remove pidfile %s" % pidfile)
raise SystemExit(1)
except IOError:
print("Failed to read pid from %s" % pidfile)
raise SystemExit(1)
if _process_alive(pid):
print("%s (instance %s) is running with pid %d" %
(program, instance, pid))
raise SystemExit(0)
else:
print("%s (instance %s) is not running" % (program, instance))
raise SystemExit(1)
elif action == "start":
if exists(pidfile):
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except ValueError:
print("Failed to parse pid from pidfile %s" % pidfile)
pf.close()
try:
print("removing corrupted pidfile %s" % pidfile)
os.unlink(pidfile)
except IOError:
print("Could not remove pidfile %s" % pidfile)
raise SystemExit(1)
except IOError:
print("Could not read pidfile %s" % pidfile)
raise SystemExit(1)
if _process_alive(pid):
print("%s (instance %s) is already running with pid %d" %
(program, instance, pid))
raise SystemExit(1)
else:
print("Removing stale pidfile %s" % pidfile)
try:
os.unlink(pidfile)
except IOError:
print("Could not remove pidfile %s" % pidfile)
# Try to create the PID directory
else:
if not os.path.exists(settings["PID_DIR"]):
try:
os.makedirs(settings["PID_DIR"])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(settings["PID_DIR"]):
pass
else:
raise
print("Starting %s (instance %s)" % (program, instance))
else:
print("Invalid action '%s'" % action)
print("Valid actions: start stop status")
raise SystemExit(1)
class CarbonAggregatorOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given aggregation rules file."],
["rewrite-rules", "", None, "Use the given rewrite rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["rules"]
if self["rewrite-rules"] is None:
self["rewrite-rules"] = join(settings["CONF_DIR"],
settings['REWRITE_RULES'])
settings["rewrite-rules"] = self["rewrite-rules"]
class CarbonRelayOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given relay rules file."],
["aggregation-rules", "", None, "Use the given aggregation rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['RELAY_RULES'])
settings["relay-rules"] = self["rules"]
if self["aggregation-rules"] is None:
self["aggregation-rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["aggregation-rules"]
router = settings["RELAY_METHOD"]
if router not in DatapointRouter.plugins:
print("In carbon.conf, RELAY_METHOD must be one of %s. "
"Invalid value: '%s'" % (', '.join(DatapointRouter.plugins), router))
raise SystemExit(1)
def get_default_parser(usage="%prog [options] <start|stop|status>"):
"""Create a parser for command line options."""
parser = OptionParser(usage=usage)
parser.add_option(
"--debug", action="store_true",
help="Run in the foreground, log to stdout")
parser.add_option(
"--syslog", action="store_true",
help="Write logs to syslog")
parser.add_option(
"--nodaemon", action="store_true",
help="Run in the foreground")
parser.add_option(
"--profile",
help="Record performance profile data to the given file")
parser.add_option(
"--profiler",
help="Specify the profiler to use")
parser.add_option(
"--pidfile", default=None,
help="Write pid to the given file")
parser.add_option(
"--umask", default=None,
help="Use the given umask when creating files")
parser.add_option(
"--config",
default=None,
help="Use the given config file")
parser.add_option(
"--whitelist",
default=None,
help="Use the given whitelist file")
parser.add_option(
"--blacklist",
default=None,
help="Use the given blacklist file")
parser.add_option(
"--logdir",
default=None,
help="Write logs in the given directory")
parser.add_option(
"--instance",
default='a',
help="Manage a specific carbon instance")
parser.add_option(
"--logfile",
default=None,
help="Log to a specified file, - for stdout")
parser.add_option(
"--logger",
default=None,
help="A fully-qualified name to a log observer factory to use for the initial log "
"observer. Takes precedence over --logfile and --syslog (when available).")
return parser
def get_parser(name):
parser = get_default_parser()
if "carbon-aggregator" in name:
parser.add_option(
"--rules",
default=None,
help="Use the given aggregation rules file.")
parser.add_option(
"--rewrite-rules",
default=None,
help="Use the given rewrite rules file.")
elif name == "carbon-relay":
parser.add_option(
"--rules",
default=None,
help="Use the given relay rules file.")
return parser
def parse_options(parser, args):
"""
Parse command line options and print usage message if no arguments were
provided for the command.
"""
(options, args) = parser.parse_args(args)
if not args:
parser.print_usage()
raise SystemExit(1)
if args[0] not in ("start", "stop", "status"):
parser.print_usage()
raise SystemExit(1)
return options, args
def read_config(program, options, **kwargs):
"""
Read settings for 'program' from configuration file specified by
'options["config"]', with missing values provided by 'defaults'.
"""
settings = Settings()
settings.update(defaults)
# Initialize default values if not set yet.
for name, value in kwargs.items():
settings.setdefault(name, value)
graphite_root = kwargs.get("ROOT_DIR")
if graphite_root is None:
graphite_root = os.environ.get('GRAPHITE_ROOT')
if graphite_root is None:
raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT "
"needs to be provided.")
# Default config directory to root-relative, unless overriden by the
# 'GRAPHITE_CONF_DIR' environment variable.
settings.setdefault("CONF_DIR",
os.environ.get("GRAPHITE_CONF_DIR",
join(graphite_root, "conf")))
if options["config"] is None:
options["config"] = join(settings["CONF_DIR"], "carbon.conf")
else:
# Set 'CONF_DIR' to the parent directory of the 'carbon.conf' config
# file.
settings["CONF_DIR"] = dirname(normpath(options["config"]))
# Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR'
# environment variable. It defaults to a path relative to GRAPHITE_ROOT
# for backwards compatibility though.
settings.setdefault("STORAGE_DIR",
os.environ.get("GRAPHITE_STORAGE_DIR",
join(graphite_root, "storage")))
def update_STORAGE_DIR_deps():
# By default, everything is written to subdirectories of the storage dir.
settings.setdefault(
"PID_DIR", settings["STORAGE_DIR"])
settings.setdefault(
"LOG_DIR", join(settings["STORAGE_DIR"], "log", program))
settings.setdefault(
"LOCAL_DATA_DIR", join(settings["STORAGE_DIR"], "whisper"))
settings.setdefault(
"WHITELISTS_DIR", join(settings["STORAGE_DIR"], "lists"))
# Read configuration options from program-specific section.
section = program[len("carbon-"):]
config = options["config"]
if not exists(config):
raise CarbonConfigException("Error: missing required config %r" % config)
settings.readFrom(config, section)
settings.setdefault("instance", options["instance"])
update_STORAGE_DIR_deps()
# If a specific instance of the program is specified, augment the settings
# with the instance-specific settings and provide sane defaults for
# optional settings.
if options["instance"]:
settings.readFrom(config,
"%s:%s" % (section, options["instance"]))
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], "%s-%s.pid" % (program, options["instance"])))
settings["LOG_DIR"] = (
options["logdir"] or
join(settings["LOG_DIR"], "%s-%s" % (program, options["instance"])))
else:
settings["pidfile"] = (
options["pidfile"] or join(settings["PID_DIR"], '%s.pid' % program))
settings["LOG_DIR"] = (options["logdir"] or settings["LOG_DIR"])
update_STORAGE_DIR_deps()
return settings
| |
from datetime import datetime
import re
from itertools import *
from django.shortcuts import render_to_response, get_object_or_404
from models import Article, Version
import models
import json
from django.http import HttpResponse, Http404
import django.db
from django.db.models import Count
from twitter import *
from django.template import Context, RequestContext, loader
from django.views.decorators.cache import cache_page
from BeautifulSoup import BeautifulSoup
import urllib2
import cookielib
import re
import socket
import time
OUT_FORMAT = '%B %d, %Y at %l:%M%P EDT'
SEARCH_ENGINES = """
http://www.ask.com
http://www.google
https://www.google
search.yahoo.com
http://www.bing.com
""".split()
RESSORTS = """
Allgemein
Politik
Wirtschaft
Regional
Technik
Wissenschaft
Gesellschaft
""".split()
SOURCES = '''
Zeit
Bild
Focus
Spiegel
Stern
Welt
FAZ
n-tv
RP-ONLINE
Sueddeutsche
TAZ
'''.split()
SEARCH_TYPES = '''
Stichwort
Autor
URL
'''.split()
def came_from_search_engine(request):
return any(x in request.META.get('HTTP_REFERER', '')
for x in SEARCH_ENGINES)
def Http400():
t = loader.get_template('404.html')
return HttpResponse(t.render(Context()), status=400)
def get_first_update(source):
if source is None:
source = ''
updates = models.Article.objects.order_by('last_update').filter(last_update__gt=datetime(1990, 1, 1, 0, 0),
url__icontains=source)
try:
return updates[0].last_update
except IndexError:
return datetime.datetime.now()
def get_last_update(source):
if source is None:
source = ''
updates = models.Article.objects.order_by('-last_update').filter(last_update__gt=datetime.datetime(1990, 1, 1, 0, 0), url__icontains=source)
try:
return updates[0].last_update
except IndexError:
return datetime.datetime.now()
def search(request):
search_type = request.REQUEST.get('search_type')
searchterm = request.REQUEST.get('searchterm').lower()
sort = request.REQUEST.get('sort')
source = request.REQUEST.get('source')
date = request.REQUEST.get('date')
ressort = request.REQUEST.get('ressort')
pagestr=request.REQUEST.get('page', '1')
results_displayed = 10 # number of results for each page
if date is None:
date = ''
if searchterm is None:
searchterm = ''
try:
page = int(pagestr)
except ValueError:
page = 1
# range of results
begin_at = 1
end_at = results_displayed
if page > 1:
begin_at = ((page-1)*results_displayed)+1
end_at = begin_at + (results_displayed-1)
if len(searchterm) > 0:
if search_type not in SEARCH_TYPES :
search_type = u'Stichwort'
if searchterm[:4] == 'http' or searchterm[:4] == 'www.':
search_type = u'URL'
if search_type == u'Stichwort':
articles = get_articles_by_keyword(searchterm, source, ressort, date, begin_at-1, end_at)
elif search_type == u'Autor':
articles = get_articles_by_author(searchterm, source, ressort, date, begin_at-1, end_at)
elif search_type == u'URL':
articles = get_articles_by_url(searchterm)
return render_to_response('suchergebnisse.html', {
'articles': articles,
'articles_count' : len(articles),
'searchterm': searchterm,
'archive_date' : date,
'search_type': search_type,
'source' : source,
'sort' : sort,
'ressort' : ressort,
'all_sources' : SOURCES,
'all_ressorts' : RESSORTS,
'page':page,
'begin_at' : begin_at,
'end_at' : begin_at + len(articles) -1,
'template' : 'suchergebnisse'
})
else:
return render_to_response('suchergebnisse.html', {'message': 'Bitte geben Sie ein Suchbegriff ein.'})
def get_archive(date, ressort, search_source, begin_at, end_at):
articles = {}
# get all articles which were updated on s specific date
article_ids = Version.objects.filter(date__year=date[6:10],
date__month=date[3:5],
date__day=date[0:2]).exclude(diff_json__isnull = True).distinct().values_list('article_id')
if len(article_ids) > 0:
article_objects = Article.objects.filter(id__in=article_ids)
if search_source in SOURCES:
article_objects = article_objects.filter(source__icontains = search_source)
if ressort in RESSORTS:
article_objects = article_objects.filter(category__icontains = ressort)
all_articles = article_objects.order_by('-last_update')[begin_at : end_at] # range of results
for a in all_articles:
versions = Version.objects.filter(article_id = a.id, boring = 0)
version_count = versions.count()
all_diffs = '/diffview/?vid1='+str(a.first_version().id)+'&vid2='+str(a.latest_version().id)
article_title = versions.order_by('-date')[0].title
articles[a.id] = {
'id': a.id,
'title': article_title,
'url': a.url,
'source': a.source,
'ressort' : a.category,
'date': a.initial_date,
'versioncount': version_count,
'all_diffs' : all_diffs
}
return articles
def grab_url(url, max_depth=5, opener=None):
if opener is None:
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
retry = False
try:
text = opener.open(url, timeout=5).read()
if '<title>NY Times Advertisement</title>' in text:
retry = True
except socket.timeout:
retry = True
if retry:
if max_depth == 0:
raise Exception('Too many attempts to download %s' % url)
time.sleep(1)
return grab_url(url, max_depth-1, opener)
return text
def get_articles_by_url(url):
html = grab_url(url)
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES, fromEncoding='utf-8')
articles = {}
alreadyOriginal = Article.objects.filter(url=url)
if not alreadyOriginal.count():
if soup.find('meta', {'property': 'og:url'}):
url = soup.find('meta', {'property': 'og:url'})['content']
elif soup.find('meta', {'name': 'og:url'}):
url = soup.find('meta', {'name': 'og:url'})['content']
all_articles = Article.objects.filter(url = url).exclude(source='')
for a in all_articles:
versions = Version.objects.filter(article_id = a.id, boring = 0)
version_count = versions.count()
if version_count > 1: # get all articles with changes
all_diffs = '/diffview/?vid1='+str(a.first_version().id)+'&vid2='+str(a.latest_version().id)
article_title = versions.order_by('-date')[0].title
articles[a.id] = {
'id': a.id,
'title': article_title,
'url': a.url,
'source': a.source,
'date': a.initial_date,
'versioncount': version_count,
'ressort' : a.category,
'all_diffs' : all_diffs
}
return list(islice(articles.iteritems(),0,1))
def get_articles_by_author(searchterm, search_source, ressort, date, begin_at, end_at):
articles = {}
all_articles = []
article_ids = Version.objects.filter(byline__icontains = searchterm).values_list('article_id')
if len(article_ids) > 0:
article_objects = Article.objects.filter(id__in=article_ids)
if len(date) is 10:
article_objects = article_objects.filter(last_update__year=date[6:10],
last_update__month=date[3:5],
last_update__day=date[0:2])
if search_source in SOURCES:
article_objects = article_objects.filter(source__icontains = search_source)
if ressort in RESSORTS :
article_objects = article_objects.filter(category = ressort)
all_articles = article_objects.order_by('-last_update')
for a in all_articles:
versions = Version.objects.filter(article_id = a.id, boring = 0)
version_count = len(versions)
if version_count > 1: # get all articles with changes
all_diffs = '/diffview/?vid1='+str(a.first_version().id)+'&vid2='+str(a.latest_version().id)
article_title = versions.order_by('-date')[0].title
articles[a.id] = {
'id': a.id,
'title': article_title,
'url': a.url,
'source': a.source,
'date': a.initial_date,
'ressort': a.category,
'versioncount': version_count,
'all_diffs' : all_diffs
}
return list(islice(articles.iteritems(),begin_at, end_at))
def get_articles_by_keyword(searchterm, search_source, ressort, date, begin_at, end_at):
articles = {}
all_articles = Article.objects.filter(keywords__icontains = searchterm)
if len(date) is 10:
all_articles = all_articles.filter(last_update__year=date[6:10],
last_update__month=date[3:5],
last_update__day=date[0:2])
if search_source in SOURCES:
all_articles = all_articles.filter(source__icontains = search_source)
if ressort in RESSORTS:
all_articles = all_articles.filter(category__icontains = ressort)
all_articles = all_articles.order_by('-initial_date')
for a in all_articles:
versions = Version.objects.filter(article_id = a.id, boring = 0)
version_count = len(versions)
if version_count > 1: # get all articles with changes
article_title = versions.order_by('-date')[0].title
all_diffs = '/diffview/?vid1='+str(a.first_version().id)+'&vid2='+str(a.latest_version().id)
articles[a.id] = {
'id': a.id,
'title': article_title,
'url': a.url,
'source': a.source,
'date': a.initial_date,
'versioncount': version_count,
'ressort' : a.category,
'all_diffs' : all_diffs
}
return list(islice(articles.iteritems(),begin_at, end_at))
def get_articles(source=None, distance=0):
articles = []
rx = re.compile(r'^https?://(?:[^/]*\.)%s/' % source if source else '')
pagelength = datetime.timedelta(days=1)
end_date = datetime.datetime.now() - distance * pagelength
start_date = end_date - pagelength
print 'Asking query'
version_query = '''SELECT
version.id, version.article_id, version.v, version.title,
version.byline, version.date, version.boring, version.diff_json,
T.age as age,
Articles.url as a_url, Articles.initial_date as a_initial_date,
Articles.last_update as a_last_update, Articles.last_check as a_last_check
FROM version,
(SELECT Articles.id as article_id, MAX(T3.date) AS age, COUNT(T3.id) AS num_vs
FROM Articles LEFT OUTER JOIN version T3 ON (Articles.id = T3.article_id)
WHERE (T3.boring=0) GROUP BY Articles.id
HAVING (age > %s AND age < %s AND num_vs > 1 )) T, Articles
WHERE (version.article_id = Articles.id) and
(version.article_id = T.article_id) and
NOT version.boring
ORDER BY date'''
all_versions = models.Version.objects.raw(version_query,
(start_date, end_date))
article_dict = {}
for v in all_versions:
a=models.Article(id=v.article_id,
url=v.a_url, initial_date=v.a_initial_date,
last_update=v.a_last_update, last_check=v.a_last_check)
v.article = a
article_dict.setdefault(v.article, []).append(v)
for article, versions in article_dict.items():
url = article.url
if not rx.match(url):
print 'REJECTING', url
continue
if 'blogs.nytimes.com' in url: #XXX temporary
continue
if len(versions) < 2:
continue
rowinfo = get_rowinfo(article, versions)
articles.append((article, versions[-1], rowinfo))
print 'Queries:', len(django.db.connection.queries), django.db.connection.queries
articles.sort(key = lambda x: x[-1][0][1].date, reverse=True)
return articles
def is_valid_domain(domain):
"""Cheap method to tell whether a domain is being tracked."""
return any(domain.endswith(source) for source in SOURCES)
def browse(request):
archive_date=request.REQUEST.get('date')
ressort=request.REQUEST.get('ressort')
source=request.REQUEST.get('source')
pagestr=request.REQUEST.get('page', '1')
sort=request.REQUEST.get('sort')
results_displayed = 10 # number of results for each page
try:
page = int(pagestr)
except ValueError:
page = 1
# range of results
begin_at = 1
end_at = results_displayed
if page > 1:
begin_at = ((page-1)*results_displayed)+1
end_at = begin_at + (results_displayed-1)
if archive_date is None or archive_date is u'':
archive_date = datetime.today().strftime('%d.%m.%Y')
articles = get_archive(archive_date, ressort, source, begin_at-1, end_at)
return render_to_response('archiv.html', {
'articles': articles,
'articles_count' : len(articles),
'archive_date': archive_date,
'all_sources': SOURCES,
'source' : source,
'ressort' : ressort,
'all_ressorts' : RESSORTS,
'page':page,
'sort' : sort,
'begin_at' : begin_at,
'end_at' : begin_at + len(articles) -1,
'template' : 'archive'
})
def feed(request, source=''):
if source not in SOURCES + ['']:
raise Http404
pagestr=request.REQUEST.get('page', '1')
try:
page = int(pagestr)
except ValueError:
page = 1
first_update = get_first_update(source)
last_update = get_last_update(source)
num_pages = (datetime.datetime.now() - first_update).days + 1
page_list=range(1, 1+num_pages)
articles = get_articles(source=source, distance=page-1)
return render_to_response('feed.xml', {
'source': source, 'articles': articles,
'page':page,
'request':request,
'page_list': page_list,
'last_update': last_update,
'sources': SOURCES
},
context_instance=RequestContext(request),
mimetype='application/atom+xml')
def diffview(request, vid1='', vid2=''):
vid1=request.REQUEST.get('vid1')
vid2=request.REQUEST.get('vid2')
try:
v1 = Version.objects.get(id=int(vid1))
v2 = Version.objects.get(id=int(vid2))
except Version.DoesNotExist:
raise Http404
article = v1.article
if v1.article != v2.article:
raise Http404
title = article.latest_version().title
versions = dict(enumerate(article.versions()))
adjacent_versions = []
dates = []
texts = []
for v in (v1, v2):
texts.append(v.text())
dates.append(v.date.strftime('%d.%m.%Y - %H:%M Uhr'))
indices = [i for i, x in versions.items() if x == v]
if not indices:
#One of these versions doesn't exist / is boring
return Http400()
index = indices[0]
adjacent_versions.append([versions.get(index+offset)
for offset in (-1, 1)])
if any(x is None for x in texts):
return Http400()
links = []
for i in range(2):
if all(x[i] for x in adjacent_versions):
diffl = '/diffview/?vid1='+str(adjacent_versions[0][i].id)+'&vid2='+str(adjacent_versions[1][i].id)
links.append(diffl)
else:
links.append('')
all_diffs = '/diffview/?vid1='+str(article.first_version().id)+'&vid2='+str(article.latest_version().id)
return render_to_response('diffview.html', {
'title': title,
'date1':dates[0], 'date2':dates[1],
'text1':texts[0], 'text2':texts[1],
'prev':links[0], 'next':links[1],
'article' : article,
'article_shorturl': article.filename(),
'article_id' : article.id,
'article_url': article.url, 'v1': v1, 'v2': v2,
'all_diffs' : all_diffs,
}, context_instance=RequestContext(request))
def get_rowinfo(article, version_lst=None):
if version_lst is None:
version_lst = article.versions()
rowinfo = []
lastv = None
for version in version_lst:
version.date = version.date.strftime('%d.%m.%Y - %H:%M Uhr')
if lastv is None:
diffl = ''
else:
diffl = '/diffview/?vid1='+str(lastv.id)+'&vid2='+str(version.id)
rowinfo.append((diffl, version))
lastv = version
rowinfo.reverse()
return rowinfo
def prepend_http(url):
"""Return a version of the url that starts with the proper scheme.
url may look like
www.nytimes.com
https:/www.nytimes.com <- because double slashes get stripped
http://www.nytimes.com
"""
components = url.split('/', 2)
if len(components) <= 2 or '.' in components[0]:
components = ['http:', '']+components
elif components[1]:
components[1:1] = ['']
return '/'.join(components)
def article_history(request):
id = request.REQUEST.get('id')
url = request.REQUEST.get('url')
if url :
article = Article.objects.get(url=url)
else:
try:
article = Article.objects.get(id=id)
except Article.DoesNotExist:
try:
return render_to_response('article_history_missing.html', {'id': id})
except (TypeError, ValueError):
# bug in django + mod_rewrite can cause this. =/
return HttpResponse('Bug!')
created_at = article.initial_date.strftime('%d.%m.%Y - %H:%M Uhr')
versions = get_rowinfo(article)
all_diffs = '/diffview/?vid1='+str(article.first_version().id)+'&vid2='+str(article.latest_version().id)
return render_to_response('article_history.html', {'article':article,
'versions':versions,
'display_search_banner': came_from_search_engine(request),
'created_at': created_at,
'source' : article.source,
'all_diffs' : all_diffs
})
def article_history_feed(request):
id = request.REQUEST.get('id')
url = request.REQUEST.get('url')
if url :
article = Article.objects.get(url=url)
else:
article = get_object_or_404(Article, id=id)
rowinfo = get_rowinfo(article)
return render_to_response('article_history.xml',
{ 'article': article,
'versions': rowinfo,
'request': request,
},
context_instance=RequestContext(request),
mimetype='application/atom+xml')
def json_view(request, vid):
version = get_object_or_404(Version, id=int(vid))
data = dict(
title=version.title,
byline = version.byline,
date = version.date.isoformat(),
text = version.text(),
)
return HttpResponse(json.dumps(data), mimetype="application/json")
def about(request):
return render_to_response('about.html', {})
def history(request):
return render_to_response('article_history.html', {})
def artikel(request):
return render_to_response('diffview.html', {})
def highlights(request):
return render_to_response('highlights.html', {})
def plugin(request):
return render_to_response('plugin.html', {})
def kontakt(request):
return render_to_response('kontakt.html', {})
def impressum(request):
return render_to_response('impressum.html', {})
def index(request):
return render_to_response('index.html', {'sources': SOURCES})
@cache_page(60 * 60) #60 minute cache
def entdecken(request):
config = {}
execfile("/var/www/dev/config.py", config)
twitter = Twitter(auth = OAuth(config["access_key"], config["access_secret"], config["consumer_key"], config["consumer_secret"]))
alltrends = twitter.trends.place(_id = 23424829)
results = []
for location in alltrends:
for trend in location["trends"]:
result = trend["name"].encode("utf-8")
if result.startswith('#'):
result = result.replace("#", "")
results.append(result)
return render_to_response('entdecken.html', {
'trend1': results[0],
'trend2': results[1],
'trend3': results[2],
'trend4': results[3],
'trend5': results[4],
'trend6': results[5],
'trend7': results[6],
'trend8': results[7],
'trend9': results[8],
'trend10': results[9],
})
| |
# coding: utf-8
# In[178]:
import pandas as pd
import numpy as np
import json
import urllib2
import requests
request='myrequest'
data = requests.get(request)
json = data.json()
df = pd.DataFrame(json)
df = df.values
import sys
data_ser = np.array(sys.argv)
#data['sex'] = sys.argv[1]
#data['address'] = sys.argv[2]
#data['familysize'] = sys.argv[3]
#data['Pstatus'] = sys.argv[4]
#data['Medu'] = sys.argv[5]
#data['Fedu'] = sys.argv[6]
#data['Mjob'] = sys.argv[7]
#data['Fjob'] = sys.argv[8]
#data['traveltime'] = sys.argv[9]
#data['studytime'] = sys.argv[10]
#data['failures'] = sys.argv[11]
#data['paid'] = sys.argv[12]
#data['activities'] = sys.argv[13]
#data['famrel'] = sys.argv[14]
#data['goout'] = sys.argv[15]
#data['percentage'] = sys.argv[16]
# In[179]:
data = pd.read_csv("student-mat.csv")
# In[180]:
data.head()
# In[181]:
y = np.array(data[["Dalc","Walc"]])
# In[182]:
labels = 2*y[:,0] + y[:,1]
# In[183]:
data.drop(["Dalc","Walc"], inplace=True, axis = 1)
# In[184]:
data.drop(["school", "age", "reason","guardian", "schoolsup", "famsup", "nursery", "higher","internet", "romantic", "freetime","health", "absences"], inplace = True, axis = 1)
# In[185]:
data
# In[186]:
grades = np.array(data[["G1","G2","G3"]])
# In[187]:
per = grades[:,0] + grades[:,1] + grades[:, 2]
# In[188]:
per = per*5/3
# In[189]:
per.shape
# In[190]:
data.drop(["G1", "G2", "G3"], inplace = True, axis = 1)
# In[191]:
data.head(10)
# In[192]:
data['address'].value_counts()
# In[193]:
data['sex'].value_counts()
# In[194]:
di = {'M' : 0, 'F' : 1}
data.replace({'sex':di}, inplace = True)
# In[195]:
di = { 'U' : 0, 'R' : 1}
data.replace({'address':di},inplace=True)
# In[ ]:
# In[196]:
data['famsize'].value_counts()
# In[197]:
di = {'LE3' : 0,'GT3' : 1}
data.replace({'famsize':di},inplace = True)
# In[198]:
data.head(6)
# In[199]:
di = { 'A' : 0, 'T' : 1}
data.replace({'Pstatus':di},inplace=True)
# In[200]:
data.head(6)
# In[201]:
data['Mjob'].value_counts()
# In[202]:
di = { 'teacher' : 0, 'health' : 1, 'services' : 2, 'at_home' : 3, 'other' : 4}
data.replace({'Mjob':di},inplace=True)
# In[203]:
data.head(6)
# In[204]:
data['Fjob'].value_counts()
# In[205]:
di = { 'teacher' : 0, 'health' : 1, 'services' : 2, 'at_home' : 3, 'other' : 4}
data.replace({'Fjob':di},inplace=True)
# In[206]:
data.head(6)
# In[207]:
data['paid'].value_counts()
# In[208]:
di = { 'no' : 0, 'yes' : 1}
data.replace({'paid':di},inplace=True)
# In[209]:
di = { 'no' : 0, 'yes' : 1}
data.replace({'activities':di},inplace=True)
# In[210]:
data.shape
# In[211]:
test = np.array(data)
# In[212]:
test
# In[213]:
test.shape
# In[214]:
train = np.zeros((395,16))
# In[215]:
train[:,:15] = test[:,:]
# In[216]:
train[:,15] = per
# In[217]:
train.shape
# In[218]:
labels = labels // 10
# In[219]:
labels.shape
# In[220]:
from sklearn.ensemble import RandomForestClassifier
# In[221]:
clf = RandomForestClassifier(n_estimators=60)
# In[222]:
)
# In[223]:
X_test.shape
# In[224]:
y_test.shape
# In[225]:
clf.fit(train, labels)
# In[226]:
#y_A = clf.predict(df)
pred = clf.predict(data_ser)
# In[227]:
#y_A
print(pred)
sys.stdout.flush()
#print(y_A)
# In[ ]:
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.opt.python.training import lazy_adam_optimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters([False, True])
def testSparse(self, use_resource):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@parameterized.parameters([False, True])
def testSparseDevicePlacement(self, use_resource):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
if use_resource:
var = resource_variable_ops.ResourceVariable([[1.0], [2.0]])
else:
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
@parameterized.parameters([False, True])
def testSparseRepeatedIndices(self, use_resource):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
if use_resource:
repeated_index_update_var = resource_variable_ops.ResourceVariable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = resource_variable_ops.ResourceVariable(
[[1.0], [2.0]], dtype=dtype)
else:
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = lazy_adam_optimizer.LazyAdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertIsNotNone(beta1_power)
self.assertIsNotNone(beta2_power is not None)
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = lazy_adam_optimizer.LazyAdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = lazy_adam_optimizer.LazyAdamOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with self.session(graph=g):
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with self.session(graph=gg):
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = lazy_adam_optimizer.LazyAdamOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(6, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
| |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import requests
import six
from ironic.common import exception
from ironic.drivers.modules import agent_client
from ironic.tests import base
class MockResponse(object):
def __init__(self, text):
assert isinstance(text, six.string_types)
self.text = text
def json(self):
return json.loads(self.text)
class MockNode(object):
def __init__(self):
self.uuid = 'uuid'
self.driver_info = {}
self.driver_internal_info = {
'agent_url': "http://127.0.0.1:9999",
'clean_version': {'generic': '1'}
}
self.instance_info = {}
def as_dict(self):
return {
'uuid': self.uuid,
'driver_info': self.driver_info,
'driver_internal_info': self.driver_internal_info,
'instance_info': self.instance_info
}
class TestAgentClient(base.TestCase):
def setUp(self):
super(TestAgentClient, self).setUp()
self.client = agent_client.AgentClient()
self.client.session = mock.Mock(autospec=requests.Session)
self.node = MockNode()
def test__get_command_url(self):
command_url = self.client._get_command_url(self.node)
expected = self.node.driver_internal_info['agent_url'] + '/v1/commands'
self.assertEqual(expected, command_url)
def test__get_command_url_fail(self):
del self.node.driver_internal_info['agent_url']
self.assertRaises(exception.IronicException,
self.client._get_command_url,
self.node)
def test__get_command_body(self):
expected = json.dumps({'name': 'prepare_image', 'params': {}})
self.assertEqual(expected,
self.client._get_command_body('prepare_image', {}))
def test__command(self):
response_data = {'status': 'ok'}
response_text = json.dumps(response_data)
self.client.session.post.return_value = MockResponse(response_text)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
url = self.client._get_command_url(self.node)
body = self.client._get_command_body(method, params)
headers = {'Content-Type': 'application/json'}
response = self.client._command(self.node, method, params)
self.assertEqual(response, response_data)
self.client.session.post.assert_called_once_with(
url,
data=body,
headers=headers,
params={'wait': 'false'})
def test__command_fail_json(self):
response_text = 'this be not json matey!'
self.client.session.post.return_value = MockResponse(response_text)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
url = self.client._get_command_url(self.node)
body = self.client._get_command_body(method, params)
headers = {'Content-Type': 'application/json'}
self.assertRaises(exception.IronicException,
self.client._command,
self.node, method, params)
self.client.session.post.assert_called_once_with(
url,
data=body,
headers=headers,
params={'wait': 'false'})
def test_get_commands_status(self):
with mock.patch.object(self.client.session, 'get') as mock_get:
res = mock.Mock()
res.json.return_value = {'commands': []}
mock_get.return_value = res
self.assertEqual([], self.client.get_commands_status(self.node))
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_prepare_image(self):
self.client._command = mock.Mock()
image_info = {'image_id': 'image'}
params = {'image_info': image_info}
self.client.prepare_image(self.node,
image_info,
wait=False)
self.client._command.assert_called_once_with(node=self.node,
method='standby.prepare_image',
params=params,
wait=False)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_prepare_image_with_configdrive(self):
self.client._command = mock.Mock()
configdrive_url = 'http://swift/configdrive'
self.node.instance_info['configdrive'] = configdrive_url
image_info = {'image_id': 'image'}
params = {
'image_info': image_info,
'configdrive': configdrive_url,
}
self.client.prepare_image(self.node,
image_info,
wait=False)
self.client._command.assert_called_once_with(node=self.node,
method='standby.prepare_image',
params=params,
wait=False)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_start_iscsi_target(self):
self.client._command = mock.Mock()
iqn = 'fake-iqn'
params = {'iqn': iqn}
self.client.start_iscsi_target(self.node, iqn)
self.client._command.assert_called_once_with(node=self.node,
method='iscsi.start_iscsi_target',
params=params,
wait=True)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_install_bootloader(self):
self.client._command = mock.Mock()
root_uuid = 'fake-root-uuid'
efi_system_part_uuid = 'fake-efi-system-part-uuid'
params = {'root_uuid': root_uuid,
'efi_system_part_uuid': efi_system_part_uuid}
self.client.install_bootloader(
self.node, root_uuid, efi_system_part_uuid=efi_system_part_uuid)
self.client._command.assert_called_once_with(
node=self.node, method='image.install_bootloader', params=params,
wait=True)
def test_get_clean_steps(self):
self.client._command = mock.Mock()
ports = []
expected_params = {
'node': self.node.as_dict(),
'ports': []
}
self.client.get_clean_steps(self.node,
ports)
self.client._command.assert_called_once_with(node=self.node,
method='clean.get_clean_steps',
params=expected_params,
wait=True)
def test_execute_clean_step(self):
self.client._command = mock.Mock()
ports = []
step = {'priority': 10, 'step': 'erase_devices', 'interface': 'deploy'}
expected_params = {
'step': step,
'node': self.node.as_dict(),
'ports': [],
'clean_version': self.node.driver_internal_info.get(
'hardware_manager_version')
}
self.client.execute_clean_step(step,
self.node,
ports)
self.client._command.assert_called_once_with(node=self.node,
method='clean.execute_clean_step',
params=expected_params,
wait=False)
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import operator
import os
import time
import flask
from oslo_config import cfg
from oslo_log import log as logging
import six
from stackalytics.dashboard import decorators
from stackalytics.dashboard import helpers
from stackalytics.dashboard import kpi
from stackalytics.dashboard import parameters
from stackalytics.dashboard import reports
from stackalytics.dashboard import vault
from stackalytics.processor import config
from stackalytics.processor import utils
# Application objects ---------
app = flask.Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('DASHBOARD_CONF', silent=True)
app.register_blueprint(reports.blueprint)
app.register_blueprint(kpi.blueprint)
LOG = logging.getLogger(__name__)
conf = cfg.CONF
conf.register_opts(config.CONNECTION_OPTS + config.DASHBOARD_OPTS)
# Handlers ---------
@app.route('/')
@decorators.templated()
def overview():
pass
@app.route('/widget')
def widget():
return flask.render_template('widget.html')
# AJAX Handlers ---------
def _get_aggregated_stats(records, metric_filter, keys, param_id,
param_title=None, finalize_handler=None):
param_title = param_title or param_id
result = dict((c, {'metric': 0, 'id': c}) for c in keys)
context = {'vault': vault.get_vault()}
if metric_filter:
for record in records:
metric_filter(result, record, param_id, context)
result[getattr(record, param_id)]['name'] = (
getattr(record, param_title))
else:
for record in records:
record_param_id = getattr(record, param_id)
result[record_param_id]['metric'] += 1
result[record_param_id]['name'] = getattr(record, param_title)
response = [r for r in result.values() if r['metric']]
if finalize_handler:
response = [item for item in map(finalize_handler, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response, item_filter=lambda x: x['id'] != '*independent')
return response
@app.route('/api/1.0/new_companies')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['start_date'])
def get_new_companies(records, **kwargs):
days = int(flask.request.args.get('days') or reports.DEFAULT_DAYS_COUNT)
start_date = int(time.time()) - days * 24 * 60 * 60
result = {}
for record in records:
company_name = record.company_name
date = record.date
if company_name not in result or result[company_name] > date:
result[company_name] = date
response = list(({'name': company_name,
'date': result[company_name],
'date_str': helpers.format_date(result[company_name])})
for company_name in result
if result[company_name] >= start_date)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_companies(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_companies(),
'company_name',
finalize_handler=finalize_handler)
@app.route('/api/1.0/stats/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_modules(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_modules(),
'module', finalize_handler=finalize_handler)
def get_core_engineer_branch(user, modules):
is_core = None
for (module, branch) in (user.get('core') or []):
if module in modules:
is_core = branch
if branch == 'master': # master is preferable, but stables are ok
break
return is_core
@app.route('/api/1.0/stats/engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_engineers(records, metric_filter, finalize_handler, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
if finalize_handler:
record = finalize_handler(record)
user = vault.get_user_from_runtime_storage(record['id'])
record['core'] = get_core_engineer_branch(user, modules)
return record
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_user_ids(),
'user_id', 'author_name',
finalize_handler=postprocessing)
@app.route('/api/1.0/stats/engineers_extended')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['metric'])
def get_engineers_extended(records, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
record = decorators.mark_finalize(record)
if not (record['mark'] or record['review'] or record['commit'] or
record['email'] or record['patch']):
return
user = vault.get_user_from_runtime_storage(record['id'])
record['company'] = helpers.get_current_company(user)
record['core'] = get_core_engineer_branch(user, modules)
return record
def record_processing(result, record, param_id):
result_row = result[getattr(record, param_id)]
record_type = record.record_type
result_row[record_type] = result_row.get(record_type, 0) + 1
if record_type == 'mark':
decorators.mark_filter(result, record, param_id, {})
result = {}
for record in records:
user_id = record.user_id
if user_id not in result:
result[user_id] = {'id': user_id, 'mark': 0, 'review': 0,
'commit': 0, 'email': 0, 'patch': 0,
'metric': 0}
record_processing(result, record, 'user_id')
result[user_id]['name'] = record.author_name
response = result.values()
response = [item for item in map(postprocessing, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/distinct_engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_distinct_engineers(records, **kwargs):
result = {}
for record in records:
result[record.user_id] = {
'author_name': record.author_name,
'author_email': record.author_email,
}
return result
@app.route('/api/1.0/activity')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('activity')
@decorators.record_filter()
def get_activity_json(records, **kwargs):
start_record = int(flask.request.args.get('start_record') or 0)
page_size = int(flask.request.args.get('page_size') or
parameters.DEFAULT_RECORDS_LIMIT)
query_message = flask.request.args.get('query_message')
return helpers.get_activity(records, start_record, page_size,
query_message)
@app.route('/api/1.0/contribution')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('contribution')
@decorators.record_filter(ignore=['metric'])
def get_contribution_json(records, **kwargs):
return helpers.get_contribution_summary(records)
@app.route('/api/1.0/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['company'])
@decorators.jsonify()
@decorators.record_filter(ignore=['company'])
def get_companies_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
companies = set(company
for company in memory_storage.get_index_keys_by_record_ids(
'company_name', record_ids))
if kwargs['_params']['company']:
companies.add(memory_storage.get_original_company_name(
kwargs['_params']['company'][0]))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(companies)]
@app.route('/api/1.0/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['module'])
@decorators.jsonify()
@decorators.record_filter(ignore=['module'])
def get_modules_json(record_ids, **kwargs):
module_id_index = vault.get_vault()['module_id_index']
tags = parameters.get_parameter(kwargs, 'tag', plural_name='tags')
# all modules mentioned in records
module_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'module', record_ids)
add_modules = set([])
for module in six.itervalues(module_id_index):
if set(module['modules']) & module_ids:
add_modules.add(module['id'])
module_ids |= add_modules
# keep only modules with specified tags
if tags:
module_ids = set(module_id for module_id in module_ids
if ((module_id in module_id_index) and
(module_id_index[module_id].get('tag') in tags)))
result = []
for module_id in module_ids:
module = module_id_index[module_id]
result.append({'id': module['id'],
'text': module['module_group_name'],
'tag': module['tag']})
return sorted(result, key=operator.itemgetter('text'))
@app.route('/api/1.0/companies/<company_name>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('company')
def get_company(company_name, **kwargs):
memory_storage_inst = vault.get_memory_storage()
for company in memory_storage_inst.get_companies():
if company.lower() == company_name.lower():
return {
'id': company_name,
'text': memory_storage_inst.get_original_company_name(
company_name)
}
flask.abort(404)
@app.route('/api/1.0/modules/<module_id>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('module')
def get_module(module_id, **kwargs):
module = helpers.extend_module(module_id)
if not module:
flask.abort(404)
return module
@app.route('/api/1.0/members')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['release', 'project_type', 'module'])
@decorators.jsonify('members')
@decorators.record_filter(ignore=['release', 'project_type', 'module'])
def get_members(records, **kwargs):
response = []
for record in records:
record = vault.extend_record(record)
nr = dict([(k, record[k]) for k in
['author_name', 'date', 'company_name', 'member_uri']])
nr['date_str'] = helpers.format_date(nr['date'])
response.append(nr)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/bp')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_bpd(records, **kwargs):
result = []
for record in records:
if record.record_type in ['bpd', 'bpc']:
record = vault.extend_record(record)
mention_date = record.get('mention_date')
if mention_date:
date = helpers.format_date(mention_date)
else:
date = 'never'
result.append({
'date': date,
'status': record['lifecycle_status'],
'metric': record.get('mention_count') or 0,
'id': record['name'],
'name': record['name'],
'link': helpers.make_blueprint_link(record['module'],
record['name'])
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/users')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['user_id'])
@decorators.jsonify()
@decorators.record_filter(ignore=['user_id'])
def get_users_json(record_ids, **kwargs):
core_in = parameters.get_single_parameter(kwargs, 'core_in') or None
valid_modules = set()
if core_in:
core_in = set(core_in.split(','))
valid_modules = vault.resolve_project_types(
kwargs['_params']['project_type'])
valid_modules = set(m[0] for m in vault.resolve_modules(
valid_modules, kwargs['_params']['release']))
user_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'user_id', record_ids)
if kwargs['_params']['user_id']:
user_ids.add(kwargs['_params']['user_id'][0])
result = []
for user_id in user_ids:
user = vault.get_user_from_runtime_storage(user_id)
r = {'id': user_id, 'text': user['user_name']}
add_flag = not core_in
if core_in and user.get('core'):
core_modules = [module_branch[0] for module_branch in user['core']
if (module_branch[1] in core_in and
module_branch[0] in valid_modules)]
if core_modules:
r['core'] = core_modules
if user['companies']:
r['company_name'] = helpers.get_current_company(user)
add_flag = True
if add_flag:
result.append(r)
result.sort(key=lambda x: x['text'])
return result
@app.route('/api/1.0/users/<user_id>')
@decorators.response()
@decorators.jsonify('user')
def get_user(user_id):
user = vault.get_user_from_runtime_storage(user_id)
if not user:
flask.abort(404)
user = helpers.extend_user(user)
return user
@app.route('/api/1.0/releases')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_releases_json(**kwargs):
return ([{'id': r['release_name'], 'text': r['release_name'].capitalize()}
for r in vault.get_release_options()],
parameters.get_default('release'))
@app.route('/api/1.0/metrics')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_metrics_json(**kwargs):
return (sorted([{'id': m, 'text': t} for m, t in
six.iteritems(parameters.METRIC_LABELS)],
key=operator.itemgetter('text')),
parameters.get_default('metric'))
@app.route('/api/1.0/project_types')
@decorators.response()
@decorators.exception_handler()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_project_types_json(**kwargs):
return ([{'id': pt['id'], 'text': pt['title'],
'child': pt.get('child', False)}
for pt in vault.get_project_types()],
parameters.get_default('project_type'))
@app.route('/api/1.0/affiliation_changes')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('affiliation_changes')
def get_company_changes(**kwargs):
start_days = str(flask.request.args.get('start_days') or
utils.timestamp_to_date(int(time.time()) -
365 * 24 * 60 * 60))
end_days = str(flask.request.args.get('end_days') or
utils.timestamp_to_date(int(time.time())))
start_date = utils.date_to_timestamp_ext(start_days)
end_date = utils.date_to_timestamp_ext(end_days)
runtime_storage = vault.get_runtime_storage()
result = []
for user in runtime_storage.get_all_users():
companies = user.get('companies') or []
if len(companies) < 2:
continue
companies_iter = iter(companies)
company = companies_iter.next()
old_company_name = company['company_name']
date = company['end_date']
for company in companies_iter:
new_company_name = company['company_name']
if start_date <= date <= end_date:
result.append({
'user_id': user['user_id'],
'user_name': user['user_name'],
'old_company_name': old_company_name,
'new_company_name': new_company_name,
'date': date,
})
old_company_name = new_company_name
date = company['end_date']
return result
def _get_week(kwargs, param_name):
date_param = parameters.get_single_parameter(kwargs, param_name)
if date_param:
ts = utils.date_to_timestamp_ext(date_param)
else:
ts = vault.get_vault()[param_name]
return utils.timestamp_to_week(ts)
@app.route('/api/1.0/stats/timeline')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('timeline')
@decorators.record_filter(ignore=['release', 'start_date'])
def timeline(records, **kwargs):
# find start and end dates
metric = parameters.get_parameter(kwargs, 'metric')
start_date = int(parameters.get_single_parameter(kwargs, 'start_date')
or 0)
release_name = parameters.get_single_parameter(kwargs, 'release') or 'all'
releases = vault.get_vault()['releases']
if 'all' in release_name:
start_week = release_start_week = _get_week(kwargs, 'start_date')
end_week = release_end_week = _get_week(kwargs, 'end_date')
else:
release = releases[release_name]
start_week = release_start_week = utils.timestamp_to_week(
release['start_date'])
end_week = release_end_week = utils.timestamp_to_week(
release['end_date'])
now = utils.timestamp_to_week(int(time.time())) + 1
# expand start-end to year if needed
if release_end_week - release_start_week < 52:
expansion = (52 - (release_end_week - release_start_week)) // 2
if release_end_week + expansion < now:
end_week += expansion
else:
end_week = now
start_week = end_week - 52
# empty stats for all weeks in range
weeks = range(start_week, end_week)
week_stat_loc = dict((c, 0) for c in weeks)
week_stat_commits = dict((c, 0) for c in weeks)
week_stat_commits_hl = dict((c, 0) for c in weeks)
if ('commits' in metric) or ('loc' in metric):
handler = lambda record: record.loc
else:
handler = lambda record: 0
# fill stats with the data
if 'person-day' in metric:
# special case for man-day effort metric
release_stat = collections.defaultdict(set)
all_stat = collections.defaultdict(set)
for record in records:
if start_week <= record.week < end_week:
day = utils.timestamp_to_day(record.date)
user_id = record.user_id
if record.release == release_name:
release_stat[day].add(user_id)
all_stat[day].add(user_id)
for day, users in six.iteritems(release_stat):
week = utils.timestamp_to_week(day * 24 * 3600)
week_stat_commits_hl[week] += len(users)
for day, users in six.iteritems(all_stat):
week = utils.timestamp_to_week(day * 24 * 3600)
week_stat_commits[week] += len(users)
else:
for record in records:
week = record.week
if start_week <= week < end_week:
week_stat_loc[week] += handler(record)
week_stat_commits[week] += 1
if 'members' in metric:
if record.date >= start_date:
week_stat_commits_hl[week] += 1
else:
if record.release == release_name:
week_stat_commits_hl[week] += 1
if 'all' == release_name and 'members' not in metric:
week_stat_commits_hl = week_stat_commits
# form arrays in format acceptable to timeline plugin
array_loc = []
array_commits = []
array_commits_hl = []
for week in weeks:
week_str = utils.week_to_date(week)
array_loc.append([week_str, week_stat_loc[week]])
array_commits.append([week_str, week_stat_commits[week]])
array_commits_hl.append([week_str, week_stat_commits_hl[week]])
return [array_commits, array_commits_hl, array_loc]
@app.template_test()
def too_old(timestamp):
age = cfg.CONF.age_warn
now = time.time()
return timestamp + age < now
def main():
logging.register_options(conf)
logging.set_defaults()
conf_file = os.getenv('STACKALYTICS_CONF')
if conf_file and os.path.isfile(conf_file):
conf(default_config_files=[conf_file])
app.config['DEBUG'] = cfg.CONF.debug
LOG.info('Stackalytics.dashboard is configured via "%s"', conf_file)
else:
conf(project='stackalytics')
logging.setup(conf, 'stackalytics.dashboard')
app.run(cfg.CONF.listen_host, cfg.CONF.listen_port)
if __name__ == '__main__':
main()
| |
# -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2006 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=====================
Markup Parser Logic
=====================
Soup Parser
~~~~~~~~~~~
This module provides a very lenient HTML/XML lexer. The `SoupLexer` class is
initialized with a listener object, which receives all low level events
(like starttag, endtag, text etc). Listeners must implement the
`ListenerInterface`.
On top of the lexer there's `SoupParser` class, which actually implements the
`ListenerInterface` itself (the parser listens to the lexer). The parser adds
HTML semantics to the lexed data and passes the events to a building listener
(`BuildingListenerInterface`). In addition to the events sent by the lexer the
`SoupParser` class generates endtag events (with empty data arguments) for
implicitly closed elements. Furthermore it knows about CDATA elements like
``<script>`` or ``<style>`` and modifies the lexer state accordingly.
The actual semantics are provided by a DTD query class (implementing
`DTDInterface`.)
"""
if __doc__:
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import re as _re
from ..._exceptions import LexerEOFError, LexerFinalizedError
from ... import interfaces as _interfaces
from . import dtd as _dtd
class SoupLexer(object):
"""
(X)HTML Tagsoup Lexer
The lexer works hard to preserve the original data. In order to achieve
this goal, it does not validate the input and recognizes its input in a
quite lenient way.
:Groups:
- `Lexer states` :
`TEXT`,
`CDATA`,
`MARKUP`,
`STARTTAG`,
`ENDTAG`,
`COMMENT`,
`MSECTION`,
`DECL`,
`PI`,
`EMPTY`,
`FINAL`
- `Regex Matchers` :
`_START_MATCH`,
`_ATT_ITER`,
`_COMMENT_SEARCH`,
`_MSECTION_MATCH`,
`_MSECTIONINVALID_MATCH`,
`_MEND_SEARCH`,
`_MSEND_SEARCH`,
`_DECL_MATCH`
:CVariables:
`TEXT` : ``int``
Lexer state ``TEXT`` (between tags)
`CDATA` : ``int``
Lexer state ``CDATA`` (between (P)CDATA tags)
`MARKUP` : ``int``
Lexer state ``MARKUP`` (``<``)
`STARTTAG` : ``int``
Lexer state ``STARTTAG`` (``<[letter]``)
`ENDTAG` : ``int``
Lexer state ``ENDTAG`` (``</``)
`COMMENT` : ``int``
Lexer state ``COMMENT`` (``<!--``)
`MSECTION` : ``int``
Lexer state ``MSECTION`` (``<![``)
`DECL` : ``int``
Lexer state ``DECL`` (``<!``)
`PI` : ``int``
Lexer state ``PI`` (``<?``)
`EMPTY` : ``int``
Lexer state ``EMPTY`` (``<>``)
`FINAL` : ``int``
Lexer state ``FINAL``
`_LEXERS` : ``tuple``
The state lexer method names (``('method', ...)``)
`_STATES` : ``tuple``
The state names (``('name', ...)``)
:IVariables:
`_state` : ``int``
The current lexer state
`_lexers` : ``list``
The state lexer methods (``[method, ...]``)
`_listener` : `ListenerInterface`
The listener the events shall be sent to
`_buffer` : ``str``
Current unprocessed buffer
`_conditional_ie_comments` : ``bool``
Handle conditional IE comments as text?
"""
# pylint: disable = no-member
def __init__(self, listener, conditional_ie_comments=True):
r"""
Initialization
:Parameters:
`listener` : `ListenerInterface`
The event listener
`conditional_ie_comments` : ``bool``
Handle conditional IE comments as text?
Conditional comments are described in full detail
at `MSDN`_\.
.. _MSDN: http://msdn.microsoft.com/en-us/library/
ms537512%28v=vs.85%29.aspx
"""
self._listener = listener
self._normalize = None
self._cdata_name = None
self._state = self.TEXT
self._lexers = [getattr(self, name) for name in self._LEXERS]
self._buffer = ''
self._conditional_ie_comments = bool(conditional_ie_comments)
def feed(self, food):
"""
Feed the lexer with new data
:Parameters:
`food` : ``str``
The data to process
"""
self._buffer += food
self._lex()
def finalize(self):
"""
Finalize the lexer
This processes the rest buffer (if any)
:Exceptions:
- `LexerEOFError` : The rest buffer could not be consumed
"""
self._lex()
if self._buffer:
raise LexerEOFError(
"Unfinished parser state %s" % self._STATES[self._state]
)
self._state = self.FINAL
def cdata(self, normalize, name):
""" Set CDATA state """
if self._state != self.FINAL:
self._state = self.CDATA
self._normalize = normalize
self._cdata_name = normalize(name)
def _lex(self):
""" Parse the current buffer """
while self._buffer:
if self._lexers[self._state]():
break
def _lex_text(self):
"""
Text lexer
State: We are between tags or at the very beginning of the document
and look for a ``<``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('<')
if pos == 0:
self._state = self.MARKUP
return False
elif pos == -1:
self._buffer = ''
else:
self._buffer, data = data[pos:], data[:pos]
self._state = self.MARKUP
self._listener.handle_text(data)
return False
def _lex_cdata(self):
"""
(PR)CDATA lexer
State: We are inside a text element and looking for the end tag only
:Return: Unfinished state?
:Rtype: ``bool``
"""
incomplete = False
data, pos = self._buffer, 0
while True:
pos = data.find('<', pos)
if pos == -1:
pos = len(data)
self._buffer = ''
break
else:
char = data[pos + 1:pos + 2]
if char == '/':
self._state = self.ENDTAG
break
elif char == '':
incomplete = True
break
else:
pos += 1
if pos > 0:
self._buffer, data = data[pos:], data[:pos]
self._listener.handle_text(data)
return incomplete
#: Regex matcher for a tagname character
#:
#: :Type: ``callable``
_TAGNAME_MATCH = _re.compile(r'[a-zA-Z0-9]').match
def _lex_markup(self):
"""
Markup lexer
State: We've hit a ``<`` character and now find out, what it's
becoming
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 2:
return True
char = data[1]
state = (self.ENDTAG, self.DECL, self.PI, self.EMPTY, -1)[
"/!?>".find(char)
]
if state == -1:
if self._TAGNAME_MATCH(char):
state = self.STARTTAG
else:
state = self.TEXT
self._buffer = data[1:]
self._listener.handle_text(data[0])
self._state = state
return False
#: Regex matcher for a start tag
#:
#: :Type: ``callable``
_START_MATCH = _re.compile(r'''
<
(?P<name>[^ \t\r\n\f/>]+)
(?P<attr>
[^"'>]*
(?:
(?:
"[^"]*"
| '[^']*'
)
[^"'>]*
)*
)
[ \t\r\n\f]*
>
''', _re.X).match
#: Regex iterator for extracting start tag attributes
#:
#: :Type: ``callable``
_ATT_ITER = _re.compile(r'''
[ \t\r\n\f]*
(?P<name>(?:/|[^ \t\r\n\f/=>]*)) # attribute name
[ \t\r\n\f]*
(?:
=
(?P<value> # optional value
[ \t\r\n\f]*"[^"]*"
| [ \t\r\n\f]*'[^']*'
| [^ \t\r\n\f/>]*
)
)?
''', _re.X).finditer
def _lex_start(self):
"""
Starttag lexer
State: We've hit a ``<x`` and now look for the ``>``.
:Return: Unfinished State?
:Rtype: ``bool``
"""
data = self._buffer
match = self._START_MATCH(data)
if match is None:
return True
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
name, attrstring = match.group('name', 'attr')
attr, closed = [], False
if attrstring:
for match in self._ATT_ITER(attrstring):
key, value = match.group('name', 'value')
if key == '/' and value is None:
closed = True
continue
if key or value is not None:
if value:
value = value.strip()
attr.append((key.strip(), value))
else: # bug in Python < 2.3.5 (fixed in rev 37262)
break
self._state = self.TEXT
self._listener.handle_starttag(name, attr, closed, data)
return False
def _lex_end(self):
"""
Endtag lexer
State: We've hit ``</``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('>') + 1
if pos == 0:
return True
self._buffer, data = data[pos:], data[:pos]
name = data[2:-1].strip()
if self._cdata_name is not None and \
self._normalize(name) != self._cdata_name:
self._state = self.CDATA
self._listener.handle_text(data)
else:
self._cdata_name = self._normalize = None
self._state = self.TEXT
self._listener.handle_endtag(name, data)
return False
#: Regex searcher for finding the end of a comment
#:
#: :Type: ``callable``
_COMMENT_SEARCH = _re.compile(r'--[ \t\r\n\f]*>').search
#: Regex searcher for matching IE conditional comment
#:
#: :Type: ``callable``
_IE_COMMENT_MATCH = _re.compile(r'''
\[[ \t\r\n\f]* (?:
[iI][fF] | [eE][lL][sS][eE] | [eE][nN][dD][iI][fF]
) [^\]]+]>
''', _re.X).match
def _lex_comment(self):
"""
Comment lexer
State: We've hit ``<!--``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 7:
return True
if self._conditional_ie_comments:
match = iec = self._IE_COMMENT_MATCH(data, 4)
else:
match = iec = None
if match is None:
match = self._COMMENT_SEARCH(data, 4)
if match is None:
return True
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
if iec:
self._listener.handle_text(data)
else:
self._listener.handle_comment(data)
return False
#: List of MS-specific marked section names (lowercased)
#:
#: :Type: ``tuple``
_MSSECTIONS = ('if', 'else', 'endif')
#: Regex matcher for the start of a marked section
#:
#: :Type: ``callable``
_MSECTION_MATCH = _re.compile(r'''
<!\[[ \t\r\n\f]*(?P<name>[^\][ \t\r\n\f>]+)(?=[\][ \t\r\n\f>])
''', _re.X).match
#: Regex matcher for the start of an invalid marked section
#:
#: :Type: ``callable``
_MSECTIONINVALID_MATCH = _re.compile(r'<!\[[ \t\r\n\f]*[\][>]').match
#: Regex searcher for the end of a marked section
#:
#: :Type: ``callable``
_MEND_SEARCH = _re.compile(r'][ \t\r\n\f]*][ \t\r\n\f]*>').search
#: Regex searcher for the end of a MS specific marked section
#:
#: :Type: ``callable``
_MSEND_SEARCH = _re.compile(r'][ \t\r\n\f]*(?:--)?[ \t\r\n\f]*>').search
def _lex_msection(self):
"""
Marked section lexer
State: We've hit a ``<![`` and now seek the end
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
match = self._MSECTION_MATCH(data)
if match is None:
match = self._MSECTIONINVALID_MATCH(data)
if match is not None: # pass invalid msection as text
pos = match.end()
self._buffer = data[pos:]
data = data[:pos]
self._state = self.TEXT
self._listener.handle_text(data)
return False
return True
name = match.group('name')
start = match.end()
if self._conditional_ie_comments and name.lower() in self._MSSECTIONS:
match = iec = self._MSEND_SEARCH(data, start)
else:
pos = data.find('[', start)
if pos >= 0:
start = pos + 1
match = self._MEND_SEARCH(data, start)
iec = None
if match is None:
return True
pos, end = match.end(), match.start()
value = data[start:end]
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
if iec:
self._listener.handle_text(data)
else:
self._listener.handle_msection(name, value, data)
return False
#: Regex matcher for a complete declaration
#:
#: This regex seems a bit nasty, but it should catch all stuff allowed
#: in declarations (including doctype). Some day, it probably needs to
#: be replaced it by real lexer states...
#:
#: :Type: ``callable``
_DECL_MATCH = _re.compile(r'''
<!
(?P<name>[^\][ \t\r\n\f>]*)
(?P<value>
[^"'<>-]* # any nonspecial
(?:
(?:
"[^"]*" # double quoted string
| '[^']*' # single quoted string (valid?)
| <!\[ # marked section
[^\]]*
(?:
](?![ \t\r\n\f]*][ \t\r\n\f]*>)
[^\]]*
)*
][ \t\r\n\f]*][ \t\r\n\f]*>
| <(?!!\[) # declaration
# hopefully not a doctype
# (but unlikely, because we are
# probably already in a DT subset)
[^"'>-]*
(?:
(?:
"[^"]*"
| '[^']*'
| -- # comment
[^-]*
(?:-[^-]+)*
--
| -(?!-) # just a hyphen
)
[^"'>-]*
)*
>
| -- # comment
[^-]*
(?:-[^-]+)*
--
| -(?!-) # just a hyphen
)
[^"'<>-]* # more non-specials
)*
)
>
''', _re.X).match
def _lex_decl(self):
"""
Declaration lexer
State: We've hit a ``<!`` and now peek inside
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 3:
return True
if data.startswith('<!--'):
self._state = self.COMMENT
return False
elif data.startswith('<!['):
self._state = self.MSECTION
return False
elif data == '<!-':
return True
match = self._DECL_MATCH(data)
if match is None:
return True
name, value = match.group('name', 'value')
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
self._listener.handle_decl(name, value.strip(), data)
return False
def _lex_pi(self):
"""
Processing instruction lexer
State: We've hit a ``<?`` and now peek inside
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('?>', 2)
if pos == -1:
return True
pos += 2
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
self._listener.handle_pi(data)
return False
def _lex_empty(self):
"""
Empty tag lexer
State: We've hit a ``<>``
:Return: Unfinished state?
:Rtype: ``bool``
"""
self._buffer, data = self._buffer[2:], self._buffer[:2]
self._state = self.TEXT
self._listener.handle_starttag('', [], False, data)
return False
def _lex_final(self):
"""
Called after the lexer was finalized
State: after all
:Exceptions:
- `LexerFinalizedError` : The lexer was already finalized
(raised always)
"""
raise LexerFinalizedError("The lexer was already finalized")
_LEXERS = []
_STATES = []
for _idx, (_statename, _funcname) in enumerate([
# pylint: disable = bad-whitespace
('FINAL', '_lex_final'),
('TEXT', '_lex_text'),
('CDATA', '_lex_cdata'),
('MARKUP', '_lex_markup'),
('STARTTAG', '_lex_start'),
('ENDTAG', '_lex_end'),
('COMMENT', '_lex_comment'),
('MSECTION', '_lex_msection'),
('DECL', '_lex_decl'),
('PI', '_lex_pi'),
('EMPTY', '_lex_empty'),
]): # noqa
setattr(SoupLexer, _statename, _idx)
_LEXERS.append(_funcname)
_STATES.append(_statename)
SoupLexer._LEXERS = tuple(_LEXERS) # pylint: disable = protected-access
SoupLexer._STATES = tuple(_STATES) # pylint: disable = protected-access
del _idx, _statename, _funcname # pylint: disable = undefined-loop-variable
del _LEXERS, _STATES
from ... import c
c = c.load('impl')
if c is not None:
DEFAULT_LEXER = c.SoupLexer
else:
DEFAULT_LEXER = SoupLexer # pylint: disable = invalid-name
del c
class SoupParser(object):
"""
=========================
(X)HTML Tag Soup Parser
=========================
Overview
~~~~~~~~
The parser is actually a tagsoup parser by design in order to process
most of the "HTML" that can be found out there. Of course, if the HTML
is well-formed and valid, this would be the best. There is only as
much HTML syntax applied as necessary to parse it. You can influence
these syntax definitions by picking another lexer. You can change
the semantics by picking another dtd query class.
This parser guarantees, that for each not-self-closing starttag event also
an endtag event is generated (if the endtag is not actually there, the
data parameter is an empty string). This also happens for empty tags (like
``br``). On the other hand, there may be more endtag events than starttag
events, because of unbalanced or wrongly nested tags.
Special constructs, which are comments, PIs, marked sections and
declarations may occur anywhere, i.e. they are not closing elements
implicitly.
The default lexer does not deal with NET tags (<h1/Heading/). Neither
does it handle unfinished starttags by SGML rules like ``<map<area>``.
It *does* know about empty tags (``<>`` and ``</>``).
CDATA elements and comments are handled in a simplified way. Once
the particular state is entered, it's only left, when the accompanying
end marker was found (``<script>...</script>``, ``<!-- ... -->``).
Anything in between is text.
How is it used?
~~~~~~~~~~~~~~~
The parser API is "streamy" on the input side and event based on the
output side. So, what you need first is a building listener, which will
receive all generated parser events and process them. Such is listener
object is expected to implement the `BuildingListenerInterface`.
Now you create a `SoupParser` instance and pass the listener object to
the contructor and the parser is ready to be fed. You can feed as many
chunks of input data you like into the parser by using the `feed`
method. Every feed call may generate mutiple events on the output side.
When you're done feeding, call the parser's `finalize` method in order
to clean up. This also flushes pending events to the listener.
:IVariables:
`listener` : `BuildingListenerInterface`
The building listener to send the events to
`lexer` : `SoupLexer`
The lexer instance
`_tagstack` : ``list``
The current tag stack
`_inempty` : ``bool``
indicates if the last tag on the stack is an empty one
`_lastopen` : ``str``
Stores the last seen open tag name
"""
__implements__ = [
_interfaces.ListenerInterface, _interfaces.ParserInterface
]
def __init__(self, listener, dtd, lexer=None):
"""
Initialization
:Parameters:
`listener` : `ListenerInterface`
The building listener
`dtd` : `DTDInterface`
DTD query object
`lexer` : ``callable``
Lexer class/factory. This mus be a callable taking an
event listener and returning a lexer instance. If omitted or
``None``, the default lexer will be used (`DEFAULT_LEXER`).
"""
self._tagstack, self._inempty, self._lastopen = [], False, ''
self.listener = listener
self._is_nestable = dtd.nestable
self._is_cdata = dtd.cdata
self._is_empty = dtd.empty
if lexer is None:
lexer = DEFAULT_LEXER
self._lexer = lexer(self)
self._normalize = listener.decoder.normalize
@classmethod
def html(cls, listener):
"""
Construct a parser using the `HTMLDTD`
:Parameters:
`listener` : `BuildingListenerInterface`
The building listener
:Return: The new parser instance
:Rtype: `SoupParser`
"""
return cls(listener, _dtd.HTMLDTD())
@classmethod
def xml(cls, listener):
"""
Construct a parser using the `XMLDTD`
:Parameters:
`listener` : `ListenerInterface`
The building listener
:Return: The new parser instance
:Rtype: `SoupParser`
"""
return cls(listener, _dtd.XMLDTD())
def _close_empty(self):
""" Ensure we close last empty tag """
if self._inempty:
self._inempty = False
self.listener.handle_endtag(self._tagstack.pop()[1], '')
#########################################################################
# ListenerInterface #####################################################
#########################################################################
def handle_text(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_text(data)
def handle_starttag(self, name, attrs, closed, data):
""" :See: `ListenerInterface` """
self._close_empty()
if name == '' and not attrs:
name = self._lastopen
else:
self._lastopen = name
tagstack = self._tagstack
nestable = self._is_nestable
starttag = self._normalize(name)
while tagstack and not nestable(tagstack[-1][0], starttag):
self.listener.handle_endtag(tagstack.pop()[1], '')
if closed:
self.listener.handle_starttag(name, attrs, closed, data)
else:
if self._is_cdata(starttag):
self._lexer.cdata(self._normalize, starttag)
self.listener.handle_starttag(name, attrs, closed, data)
tagstack.append((starttag, name))
if self._is_empty(starttag):
self._inempty = True
def handle_endtag(self, name, data):
""" :See: `ListenerInterface` """
tagstack = self._tagstack
if tagstack:
if name == '':
name = tagstack[-1][1]
endtag = self._normalize(name)
if endtag in dict(tagstack):
toclose, original = tagstack.pop()
self._inempty = False
while toclose != endtag:
self.listener.handle_endtag(original, '')
toclose, original = tagstack.pop()
self._close_empty()
self.listener.handle_endtag(name, data)
def handle_comment(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_comment(data)
def handle_msection(self, name, value, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_msection(name, value, data)
def handle_decl(self, name, value, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_decl(name, value, data)
def handle_pi(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_pi(data)
def handle_escape(self, escaped, data):
""" :See: `ListenerInterface` """
# pylint: disable = unused-argument
raise AssertionError()
#########################################################################
# ParserInterface #######################################################
#########################################################################
def feed(self, food):
""" :See: `ParserInterface` """
self._lexer.feed(food)
def finalize(self):
"""
:See: `ParserInterface`
:Exceptions:
- `LexerEOFError` : EOF in the middle of a state
"""
if self._lexer is not None:
self._lexer, _ = None, self._lexer.finalize() # noqa
tagstack = self._tagstack
while tagstack:
self.listener.handle_endtag(tagstack.pop()[1], '')
from ... import c
c = c.load('impl')
if c is not None:
DEFAULT_PARSER = c.SoupParser
else:
DEFAULT_PARSER = SoupParser # pylint: disable = invalid-name
del c
| |
"""Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..utils.fixes import in1d
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(archive_path):
logger.warn("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
open(cache_path, 'wb').write(pickle.dumps(cache).encode('zip'))
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
cache = pickle.loads(open(cache_path, 'rb').read().decode('zip'))
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
data.description = 'the 20 newsgroups by date dataset'
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| |
# global variables and functions for graphic display management
# to be imported with
#import graphicDisplayGlobalVarAndFunctions as gvf
# useful links
#labels and colors in networkX
# https://networkx.github.io/documentation/latest/examples/drawing/labels_and_colors.html
# look also at
# https://www.wakari.io/sharing/bundle/nvikram/Basics%20of%20Networkx
# Matplotlib colors
# http://matplotlib.org/api/colors_api.html
# html colors
# http://www.w3schools.com/html/html_colornames.asp
# in this module the try/except structures are not cotrolled for debug
# these try/except constucts, indeed, are not intended to control user errors,
# but a regular flow of inputs
import networkx as nx
import matplotlib.pyplot as plt
import commonVar as common
# the base: creating the graph (and copying its address in a common variable
# to have the possibility of direct interaction with the graph when
# the program is finished, as the common space is imported also in the main
# program
def createGraph():
global colors, pos
common.g = nx.DiGraph() # directed graph, instead of nx.Graph()
colors = {}
pos = {}
common.g_labels = {}
common.g_edge_labels = {} # copy the address of the labels of the edges
# searching tools
def findNodesFromSector(sector):
nodeList = []
for aNode in common.g.nodes():
if common.g.nodes[aNode]['sector'] == sector:
nodeList.append(aNode)
return nodeList
def createEdge(a, b):
# implicitly directed, due to the use of DiGraph
if a is None or b is None:
print("Internal error, attempt to create an edge with a node defined None")
exit(0)
try:
common.g[a][b]['weight'] = 1 + common.g[a][b]['weight']
except BaseException:
common.g.add_edge(a, b)
common.g[a][b]['weight'] = 1
if a != b:
# verifying the presence of the edge in the other direction
try:
otherW = common.g[b][a]['weight']
common.g_edge_labels[a, b] = "w.s %d and %d" % (
common.g[a][b]['weight'], otherW)
common.g_edge_labels[b, a] = ""
except BaseException:
common.g_edge_labels[a, b] = "w. %d" % common.g[a][b]['weight']
if a == b:
common.g_edge_labels[a, b] = ""
common.g[a][b]['pseudoLabel'] = "auto link w. %d" \
% common.g[a][b]['weight']
# using networkX and matplotlib case
def closeNetworkXdisplay():
plt.close()
def openClearNetworkXdisplay():
if common.graphicStatus == "PythonViaTerminal":
plt.ion()
# plt.clf()
def clearNetworkXdisplay():
plt.clf()
def getGraph():
try:
return common.g
except BaseException:
return 0
def pruneEdges():
if not common.prune:
return
common.prune = False
print("New threshold to prune: < %d" % common.pruneThreshold)
#edges=common.g.edges() modified with NetworkX 2.0
edges=[]
for anE in common.g.edges():
edges.append(anE)
print("weights of the links")
for anEdge in edges:
u = anEdge[0].number
uu = anEdge[0]
v = anEdge[1].number
vv = anEdge[1]
w = common.g[anEdge[0]][anEdge[1]]["weight"]
print(u, v, w)
if w < common.pruneThreshold:
# managing labels, related to createEdge phase above
common.g_edge_labels.pop((uu, vv))
try:
common.g_edge_labels[vv,
uu] = "w. %d" % common.g[vv][uu]['weight']
except BaseException:
pass
if uu == vv:
common.g[uu][uu]['pseudoLabel'] = ""
common.g_labels[uu] = str(uu.number) + " (" +\
str(len(uu.recipeWaitingList)) + ")"
# removing
common.g.remove_edge(uu, vv)
def drawGraph():
# directed, due to the use of DiGraph
# draw_netwokx is well documented at
# https://networkx.github.io/documentation/latest/reference/
# generated/networkx.drawing.nx_pylab.draw_networkx.html
# nx.draw_networkx(agentGraph, font_size=10,node_size=500, \
clearNetworkXdisplay()
pruneEdges()
nx.draw_networkx(common.g, pos, font_size=10, node_size=500,
node_color=list(colors.values()),
labels=common.g_labels)
nx.draw_networkx_edge_labels(
common.g,
pos,
edge_labels=common.g_edge_labels,
font_size=9)
# plt.draw()
plt.show() # used by %Matplotlib inline [without ion()]; not conflicting
# with ion()
if common.graphicStatus == "PythonViaTerminal":
plt.pause(0.01)
# to show the sequence of the shown images in absence of pauses
# print agentGraph.nodes(data=True)
# print agentGraph.edges(data=True)
# print labels
# print edge_labels
# print a, agentGraph.node[a].keys(), agentGraph.node[a].values(),\
# agentGraph.node[a]['sector']
# adjacency
print()
for i in range(len(common.orderedListOfNodes)):
print("%d " % common.orderedListOfNodes[i].number, end=' ')
print()
# print "drawGraph verification of existing nodes",common.g.nodes()
if common.g.nodes() != []:
A = nx.adjacency_matrix(common.g, nodelist=common.orderedListOfNodes,
weight='weight')
# print A # as sparse matrix, defaul from nx 1.9.1
print(A.todense()) # as a regular matrix
else:
print("No nodes, impossible to create the adjacency_matrix")
print()
# neighbors
for aNode in common.g.nodes():
print(aNode.number, [node.number
for node in nx.neighbors(common.g, aNode)])
# betweenness_centrality
# Betweenness centrality of a node v is the sum of the fraction of all-pairs
# shortest paths that pass through v
# http://networkx.lanl.gov/reference/generated/
# networkx.algorithms.centrality.betweenness_centrality.html
print()
print("betweenness_centrality")
common.btwn = nx.betweenness_centrality(
common.g, normalized=False, weight='weight')
# print btw
for i in range(len(common.orderedListOfNodes)):
print(common.orderedListOfNodes[i].number,
common.btwn[common.orderedListOfNodes[i]])
# closeness_centrality
# Closeness centrality at a node is 1/average distance to all other nodes
# http://networkx.lanl.gov/reference/generated/
# networkx.algorithms.centrality.closeness_centrality.html
print()
print("closeness_centrality")
common.clsn = nx.closeness_centrality(common.g)
# print clsn
for i in range(len(common.orderedListOfNodes)):
print(common.orderedListOfNodes[i].number,
common.clsn[common.orderedListOfNodes[i]])
| |
from __future__ import print_function, division
import numpy as np
from timeit import default_timer as timer
from pyscf.nao import tddft_iter
class tddft_tem(tddft_iter):
def __init__(self, **kw):
"""
Iterative TDDFT using the electostatic potential of a moving charge as perturbation.
The units of the input are in Hartree atomic units...
Input Parameters:
dr: spatial resolution for the electron trajectory in atomic unit.
Warning: This parameter influence the accuracy of the calculations.
if it is taken too large the results will be wrong.
freq: Frequency range (in atomic unit), freq[0] must be 0.0!!
"""
tddft_iter.__init__(self, **kw)
self.freq = kw["freq"] if "freq" in kw else np.arange(0.0, 0.367, 1.5*self.eps)
self.dr = kw["dr"] if "dr" in kw else np.array([0.3, 0.3, 0.3])
self.V_freq = None
self.velec = None
self.beam_offset = None
def get_spectrum_nonin(self, velec = np.array([1.0, 0.0, 0.0]), beam_offset = np.array([0.0, 0.0, 0.0]),
tmp_fname=None, calc_Vext=True):
"""
Calculate the non interacting TEM spectra for an electron trajectory
Input Parameters:
velec: xyz component of the electron velocity in atomic unit
beam_offset: xyz components of the beam offset, must be orthogonal
to velec in atomic unit
"""
assert velec.size == 3
assert beam_offset.size == 3
if tmp_fname is not None:
if not isinstance(tmp_fname, str):
raise ValueError("tmp_fname must be a string")
if not calc_Vext and any(self.velec != velec):
calc_Vext = True
self.velec = velec
if not calc_Vext and any(self.beam_offset != beam_offset):
calc_Vext = True
self.beam_offset = beam_offset
self.vnorm = np.sqrt(np.dot(self.velec, self.velec))
self.vdir = self.velec/self.vnorm
self.check_collision(self.atom2coord)
self.get_time_range()
if calc_Vext:
self.calc_external_potential()
else:
if self.V_freq is None:
self.calc_external_potential()
return self.comp_tem_spectrum_nonin(tmp_fname=tmp_fname)
def get_spectrum_inter(self, velec = np.array([1.0, 0.0, 0.0]),
beam_offset = np.array([0.0, 0.0, 0.0]),
tmp_fname=None, calc_Vext=True):
"""
Calculate the interacting TEM spectra for an electron trajectory
Input Parameters:
velec: xyz component of the electron velocity in atomic unit
beam_offset: xyz components of the beam offset, must be orthogonal
to velec in atomic unit
"""
assert velec.size == 3
assert beam_offset.size == 3
if tmp_fname is not None:
if not isinstance(tmp_fname, str):
raise ValueError("tmp_fname must be a string")
if not calc_Vext and any(self.velec != velec):
calc_Vext = True
self.velec = velec
if not calc_Vext and any(self.beam_offset != beam_offset):
calc_Vext = True
self.beam_offset = beam_offset
self.vnorm = np.sqrt(np.dot(self.velec, self.velec))
self.vdir = self.velec/self.vnorm
self.check_collision(self.atom2coord)
self.get_time_range()
#print(__name__, calc_Vext)
if calc_Vext:
self.calc_external_potential()
else:
if self.V_freq is None:
print("self.V_freq is None")
self.calc_external_potential()
return self.comp_tem_spectrum(tmp_fname=tmp_fname)
def check_collision(self, atom2coord):
"""
Check if the electron collide with an atom
"""
if self.verbosity>0:
print("tem parameters:")
print("vdir: ", self.vdir)
print("vnorm: ", self.vnorm)
print("beam_offset: ", self.beam_offset)
assert abs(np.dot(self.velec, self.beam_offset)) < 1e-8 # check orthogonality between beam direction
# and beam offset
R0 = -100.0*np.max(atom2coord)*self.vdir + self.beam_offset
for atm in range(atom2coord.shape[0]):
vec = R0 - atom2coord[atm, :]
# unit vector to compare to vdir
vec = abs(vec/np.sqrt(np.dot(vec, vec)))
if np.sqrt(np.dot(vec-self.vdir, vec-self.vdir)) < 1e-6:
######### fancy message does not work in python2
mess = 'np.sqrt(np.dot(vec-self.vdir, vec-self.vdir))<1e-6:'
print("atoms {0} coordinate: ".format(atm), atom2coord[atm, :])
#mess = """
#Electron is collinding with atom {0}:
#velec = [{1:.3f}, {2:.3f}, {3:.3f}]
#beam_offset = [{4:.3f}, {5:.3f}, {6:.3f}]
#atom coord = [{7:.3f}, {8:.3f}, {9:.3f}]
#impact parameter = {10:.9f} > 1e-6""".format(atm, *self.velec,*self.beam_offset[0],*atom2coord[atm, :],np.sqrt(np.dot(vec, self.vdir)))
raise ValueError(mess)
def get_time_range(self):
"""
Get the time and symmetric frequency range for the electron passing close
to the particle. The tim e range is a symmetric array
around 0.0. At t = 0, the electron is at its closest
position from the molecule. This array will depend on the
frequency range and the spatial precision dr.
To respect the Fourier transform convention, the following
relationshiip must be fulfill,
N = 2*pi/(dw*dt)
with N the number of element of t.
N must be an odd number in order that t is symmetric
"""
from pyscf.nao.m_tools import is_power2
dt = np.min(self.dr)/self.vnorm
dw = self.freq[1] - self.freq[0]
N_org = int(2*np.pi/(dw*dt))
# to improve performance, N must be a power of 2
if not is_power2(N_org):
power = 1
while 2**power < N_org:
power +=1
minima = np.argmin(np.array([abs(2**(power-1) - N_org), abs(2**power - N_org)]))
if minima == 0:
N = 2**(power-1)
else:
N = 2**power
if self.verbosity>0: print("N_org = {0}, N_new = {1}".format(N_org, N))
dt = 2*np.pi/(N*dw)
dr = dt*self.vnorm
self.dr = np.array([dr, dr, dr])
else:
N = N_org
dw_symm = 2.0*np.pi/(N*dt)
wmax = 2.0*np.pi*(N-1)/(N*dt)/2.0
self.freq_symm = np.arange(-wmax, wmax+dw_symm, dw_symm)[0:N]
tmax = (N-1)*dt/2
self.time = np.arange(-tmax, tmax+dt, dt)[0:N]
def calc_external_potential(self):
"""
Calculate the external potential created by a moving charge
"""
from pyscf.nao.m_comp_vext_tem import comp_vext_tem
self.V_freq = comp_vext_tem(self, self.pb.prod_log, self.numba_parallel)
if self.verbosity>0: print("sum(V_freq) = ", np.sum(abs(self.V_freq.real)), np.sum(abs(self.V_freq.imag)))
def comp_tem_spectrum(self, x0=False, tmp_fname=None):
"""
Compute interacting polarizability
Inputs:
-------
* comegas (complex 1D array): frequency range (in Hartree) for which the polarizability is computed.
The imaginary part control the width of the signal.
For example,
td = tddft_iter_c(...)
comegas = np.arange(0.0, 10.05, 0.05) + 1j*td.eps
* x0 (boolean, optional): determine if a starting guess array should be use to
guess the solution. if True, it will use the non-interacting
polarizability as guess.
* tmp_fname (string, default None): temporary file to save polarizability
at each frequency. Can be a life saver for large systems.
The format of the file is the following,
# energy (Hartree) Re(gamma) Im(gamma)
Output:
-------
gamma (complex 1D array): computed eels spectrum
self.dn (complex 2D array): computed density change in prod basis
"""
comegas = self.freq + 1.0j*self.eps
gamma = np.zeros_like(comegas, dtype=np.complex64)
self.dn = np.zeros((comegas.shape[0], self.nprod), dtype=np.complex64)
for iw,comega in enumerate(comegas):
if self.verbosity>0: print("freq = ", iw)
if x0 == True:
veff = self.comp_veff(self.V_freq[iw, :], comega, x0=self.dn0[iw, :])
else:
veff = self.comp_veff(self.V_freq[iw, :], comega, x0=None)
self.dn[iw, :] = self.apply_rf0(veff, comega)
gamma[iw] = np.dot(np.conj(self.V_freq[iw, :]), self.dn[iw, :])
if tmp_fname is not None:
tmp = open(tmp_fname, "a")
tmp.write("{0} {1} {2}\n".format(comega.real, -gamma[iw].real/np.pi,
-gamma[iw].imag/np.pi))
tmp.close() # Need to open and close the file at every freq, otherwise
# tmp is written only at the end of the calculations, therefore,
# it is useless
return -gamma/np.pi
def comp_tem_spectrum_nonin(self, tmp_fname = None):
"""
Compute non-interacting polarizability
Inputs:
-------
comegas (complex 1D array): frequency range (in Hartree) for which the polarizability is computed.
The imaginary part control the width of the signal.
For example,
td = tddft_iter_c(...)
comegas = np.arange(0.0, 10.05, 0.05) + 1j*td.eps
Output:
-------
gamma (complex 1D array): computed non-interacting eels spectrum
self.dn0 (complex 2D array): computed non-interacting density change in prod basis
"""
comegas = self.freq + 1.0j*self.eps
gamma = np.zeros(comegas.shape, dtype=np.complex64)
self.dn0 = np.zeros((comegas.shape[0], self.nprod), dtype=np.complex64)
for iw, comega in enumerate(comegas):
self.dn0[iw, :] = self.apply_rf0(self.V_freq[iw, :], comega)
gamma[iw] = np.dot(self.dn0[iw, :], np.conj(self.V_freq[iw, :]))
if tmp_fname is not None:
tmp = open(tmp_fname, "a")
tmp.write("{0} {1} {2}\n".format(comega.real, -gamma[iw].real/np.pi,
-gamma[iw].imag/np.pi))
tmp.close() # Need to open and close the file at every freq, otherwise
# tmp is written only at the end of the calculations, therefore,
# it is useless
return -gamma/np.pi
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The HQLdesign class can (de)serialize a design to/from a QueryDict.
"""
import json
import logging
import re
import urlparse
import django.http
from django import forms
from desktop.lib.django_forms import BaseSimpleFormSet, MultiForm
from desktop.lib.django_mako import render_to_string
from hadoop.cluster import get_hdfs
LOG = logging.getLogger(__name__)
SERIALIZATION_VERSION = '0.4.1'
def hql_query(hql, database='default', query_type=None):
data_dict = json.loads('{"query": {"email_notify": false, "query": null, "type": 0, "is_parameterized": true, "database": "default"}, '
'"functions": [], "VERSION": "0.4.1", "file_resources": [], "settings": []}')
if not (isinstance(hql, str) or isinstance(hql, unicode)):
raise Exception('Requires a SQL text query of type <str>, <unicode> and not %s' % type(hql))
data_dict['query']['query'] = strip_trailing_semicolon(hql)
data_dict['query']['database'] = database
if query_type:
data_dict['query']['type'] = query_type
hql_design = HQLdesign()
hql_design._data_dict = data_dict
return hql_design
class HQLdesign(object):
"""
Represents an HQL design, with methods to perform (de)serialization.
We support queries that aren't parameterized, in case users
want to use "$" natively, but we leave that as an advanced
option to turn off.
"""
_QUERY_ATTRS = [ 'query', 'type', 'is_parameterized', 'email_notify', 'database' ]
_SETTINGS_ATTRS = [ 'key', 'value' ]
_FILE_RES_ATTRS = [ 'type', 'path' ]
_FUNCTIONS_ATTRS = [ 'name', 'class_name' ]
def __init__(self, form=None, query_type=None):
"""Initialize the design from a valid form data."""
if form is not None:
assert isinstance(form, MultiForm)
self._data_dict = {
'query': normalize_form_dict(form.query, HQLdesign._QUERY_ATTRS),
'settings': normalize_formset_dict(form.settings, HQLdesign._SETTINGS_ATTRS),
'file_resources': normalize_formset_dict(form.file_resources, HQLdesign._FILE_RES_ATTRS),
'functions': normalize_formset_dict(form.functions, HQLdesign._FUNCTIONS_ATTRS)
}
if query_type is not None:
self._data_dict['query']['type'] = query_type
def dumps(self):
"""Returns the serialized form of the design in a string"""
dic = self._data_dict.copy()
dic['VERSION'] = SERIALIZATION_VERSION
return json.dumps(dic)
@property
def hql_query(self):
return self._data_dict['query']['query']
@hql_query.setter
def hql_query(self, query):
self._data_dict['query']['query'] = query
@property
def query(self):
return self._data_dict['query'].copy()
@property
def settings(self):
return list(self._data_dict['settings'])
@property
def file_resources(self):
return list(self._data_dict['file_resources'])
@property
def functions(self):
return list(self._data_dict['functions'])
def get_configuration_statements(self):
configuration = []
for f in self.file_resources:
if not urlparse.urlsplit(f['path']).scheme:
scheme = get_hdfs().fs_defaultfs
else:
scheme = ''
configuration.append(render_to_string("hql_resource.mako", dict(type=f['type'], path=f['path'], scheme=scheme)))
for f in self.functions:
configuration.append(render_to_string("hql_function.mako", f))
return configuration
def get_query_dict(self):
# We construct the mform to use its structure and prefix. We don't actually bind data to the forms.
from beeswax.forms import QueryForm
mform = QueryForm()
mform.bind()
res = django.http.QueryDict('', mutable=True)
res.update(denormalize_form_dict(
self._data_dict['query'], mform.query, HQLdesign._QUERY_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['settings'], mform.settings, HQLdesign._SETTINGS_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['file_resources'], mform.file_resources, HQLdesign._FILE_RES_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['functions'], mform.functions, HQLdesign._FUNCTIONS_ATTRS))
return res
@staticmethod
def loads(data):
"""Returns an HQLdesign from the serialized form"""
dic = json.loads(data)
dic = dict(map(lambda k: (str(k), dic.get(k)), dic.keys()))
if dic['VERSION'] != SERIALIZATION_VERSION:
LOG.error('Design version mismatch. Found %s; expect %s' % (dic['VERSION'], SERIALIZATION_VERSION))
# Convert to latest version
del dic['VERSION']
if 'type' not in dic['query'] or dic['query']['type'] is None:
dic['query']['type'] = 0
if 'database' not in dic['query']:
dic['query']['database'] = 'default'
design = HQLdesign()
design._data_dict = dic
return design
def get_query(self):
return self._data_dict["query"]
@property
def statement_count(self):
return len(self.statements)
def get_query_statement(self, n=0):
return self.statements[n]
@property
def statements(self):
hql_query = strip_trailing_semicolon(self.hql_query)
return [strip_trailing_semicolon(statement.strip()) for statement in split_statements(hql_query)]
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def split_statements(hql):
"""
Just check if the semicolon is between two non escaped quotes,
meaning it is inside a string or a real separator.
"""
statements = []
current = ''
between_quotes = None
for c in hql:
current += c
if c in ('"', "'"):
if between_quotes == c:
between_quotes = None
elif between_quotes is None:
between_quotes = c
elif c == ';':
if between_quotes is None:
statements.append(current)
current = ''
if current and current != ';':
statements.append(current)
return statements
def normalize_form_dict(form, attr_list):
"""
normalize_form_dict(form, attr_list) -> A dictionary of (attr, value)
Each attr is a field name. And the value is obtained by looking up the form's data dict.
"""
assert isinstance(form, forms.Form)
res = { }
for attr in attr_list:
res[attr] = form.cleaned_data.get(attr)
return res
def normalize_formset_dict(formset, attr_list):
"""
normalize_formset_dict(formset, attr_list) -> A list of dictionary of (attr, value)
"""
assert isinstance(formset, BaseSimpleFormSet)
res = [ ]
for form in formset.forms:
res.append(normalize_form_dict(form, attr_list))
return res
def denormalize_form_dict(data_dict, form, attr_list):
"""
denormalize_form_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(form, forms.Form)
res = django.http.QueryDict('', mutable=True)
for attr in attr_list:
try:
res[str(form.add_prefix(attr))] = data_dict[attr]
except KeyError:
pass
return res
def denormalize_formset_dict(data_dict_list, formset, attr_list):
"""
denormalize_formset_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(formset, BaseSimpleFormSet)
res = django.http.QueryDict('', mutable=True)
for i, data_dict in enumerate(data_dict_list):
prefix = formset.make_prefix(i)
form = formset.form(prefix=prefix)
res.update(denormalize_form_dict(data_dict, form, attr_list))
res[prefix + '-_exists'] = 'True'
res[str(formset.management_form.add_prefix('next_form_id'))] = str(len(data_dict_list))
return res
def __str__(self):
return '%s: %s' % (self.__class__, self.query)
_SEMICOLON_WHITESPACE = re.compile(";\s*$")
def strip_trailing_semicolon(query):
"""As a convenience, we remove trailing semicolons from queries."""
s = _SEMICOLON_WHITESPACE.split(query, 2)
if len(s) > 1:
assert len(s) == 2
assert s[1] == ''
return s[0]
| |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import functools
import logging
import json
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
from azure.core.pipeline.policies import SansIOHTTPPolicy
from azure.keyvault.secrets.aio import SecretClient
from dateutil import parser as date_parse
from _shared.test_case_async import KeyVaultTestCase
from _test_case import client_setup, get_decorator, SecretsTestCase
all_api_versions = get_decorator(is_async=True)
logging_enabled = get_decorator(is_async=True, logging_enable=True)
logging_disabled = get_decorator(is_async=True, logging_enable=False)
# used for logging tests
class MockHandler(logging.Handler):
def __init__(self):
super(MockHandler, self).__init__()
self.messages = []
def emit(self, record):
self.messages.append(record)
class KeyVaultSecretTest(SecretsTestCase, KeyVaultTestCase):
def _assert_secret_attributes_equal(self, s1, s2):
self.assertEqual(s1.name, s2.name)
self.assertEqual(s1.vault_url, s2.vault_url)
self.assertEqual(s1.content_type, s2.content_type)
self.assertEqual(s1.enabled, s2.enabled)
self.assertEqual(s1.not_before, s2.not_before)
self.assertEqual(s1.expires_on, s2.expires_on)
self.assertEqual(s1.created_on, s2.created_on)
self.assertEqual(s1.updated_on, s2.updated_on)
self.assertEqual(s1.recovery_level, s2.recovery_level)
self.assertEqual(s1.key_id, s2.key_id)
def _validate_secret_bundle(self, secret_attributes, vault, secret_name, secret_value):
prefix = "/".join(s.strip("/") for s in [vault, "secrets", secret_name])
id = secret_attributes.id
self.assertTrue(id.index(prefix) == 0, "Id should start with '{}', but value is '{}'".format(prefix, id))
self.assertEqual(
secret_attributes.value,
secret_value,
"value should be '{}', but is '{}'".format(secret_value, secret_attributes.value),
)
self.assertTrue(
secret_attributes.properties.created_on and secret_attributes.properties.updated_on,
"Missing required date attributes.",
)
async def _validate_secret_list(self, secrets, expected):
async for secret in secrets:
if secret.name in expected.keys():
expected_secret = expected[secret.name]
self._assert_secret_attributes_equal(expected_secret.properties, secret)
del expected[secret.name]
self.assertEqual(len(expected), 0)
@all_api_versions()
@client_setup
async def test_secret_crud_operations(self, client, **kwargs):
secret_name = self.get_resource_name("crud-secret")
secret_value = "crud_secret_value"
# create secret
created = await client.set_secret(secret_name, secret_value)
self._validate_secret_bundle(created, client.vault_url, secret_name, secret_value)
# set secret with optional arguments
not_before = date_parse.parse("2015-02-02T08:00:00.000Z")
enabled = True
tags = {"foo": "created tag"}
created = await client.set_secret(secret_name, secret_value, enabled=enabled, not_before=not_before, tags=tags)
self._validate_secret_bundle(created, client.vault_url, secret_name, secret_value)
self.assertEqual(enabled, created.properties.enabled)
self.assertEqual(not_before, created.properties.not_before)
self.assertEqual(tags, created.properties.tags)
# get secret without version
retrieved_secret = await client.get_secret(created.name, "")
self.assertEqual(created.id, retrieved_secret.id)
self._assert_secret_attributes_equal(created.properties, retrieved_secret.properties)
# get secret with version
secret_with_version = await client.get_secret(created.name, created.properties.version)
self.assertEqual(created.id, retrieved_secret.id)
self._assert_secret_attributes_equal(created.properties, secret_with_version.properties)
async def _update_secret(secret):
content_type = "text/plain"
expires = date_parse.parse("2050-02-02T08:00:00.000Z")
tags = {"foo": "updated tag"}
enabled = not secret.properties.enabled
updated_secret = await client.update_secret_properties(
secret.name,
version=secret.properties.version,
content_type=content_type,
expires_on=expires,
tags=tags,
enabled=enabled,
)
self.assertEqual(tags, updated_secret.tags)
self.assertEqual(secret.id, updated_secret.id)
self.assertEqual(content_type, updated_secret.content_type)
self.assertEqual(expires, updated_secret.expires_on)
self.assertNotEqual(secret.properties.enabled, updated_secret.enabled)
self.assertNotEqual(secret.properties.updated_on, updated_secret.updated_on)
return updated_secret
# update secret with version
if self.is_live:
# wait a second to ensure the secret's update time won't equal its creation time
await asyncio.sleep(1)
updated = await _update_secret(created)
# delete secret
deleted = await client.delete_secret(updated.name)
self.assertIsNotNone(deleted)
@all_api_versions()
@client_setup
async def test_secret_list(self, client, **kwargs):
max_secrets = self.list_test_size
expected = {}
# create many secrets
for x in range(0, max_secrets):
secret_name = self.get_resource_name("sec{}".format(x))
secret_value = "secVal{}".format(x)
secret = None
while not secret:
secret = await client.set_secret(secret_name, secret_value)
expected[secret_name] = secret
# list secrets
result = client.list_properties_of_secrets(max_page_size=max_secrets - 1)
await self._validate_secret_list(result, expected)
@all_api_versions()
@client_setup
async def test_list_deleted_secrets(self, client, **kwargs):
expected = {}
# create secrets
for i in range(self.list_test_size):
secret_name = self.get_resource_name("secret{}".format(i))
secret_value = "value{}".format(i)
expected[secret_name] = await client.set_secret(secret_name, secret_value)
# delete them
for secret_name in expected.keys():
await client.delete_secret(secret_name)
# validate list deleted secrets with attributes
async for deleted_secret in client.list_deleted_secrets():
self.assertIsNotNone(deleted_secret.deleted_date)
self.assertIsNotNone(deleted_secret.scheduled_purge_date)
self.assertIsNotNone(deleted_secret.recovery_id)
if deleted_secret.name in expected:
expected_secret = expected[deleted_secret.name]
self._assert_secret_attributes_equal(expected_secret.properties, deleted_secret.properties)
@all_api_versions()
@client_setup
async def test_list_versions(self, client, **kwargs):
secret_name = self.get_resource_name("sec")
secret_value = "secVal"
max_secrets = self.list_test_size
expected = {}
# create many secret versions
for _ in range(0, max_secrets):
secret = None
while not secret:
secret = await client.set_secret(secret_name, secret_value)
expected[secret.id] = secret
# list secret versions
result = client.list_properties_of_secret_versions(secret_name, max_page_size=max_secrets - 1)
# validate list secret versions with attributes
async for secret in result:
if secret.id in expected.keys():
expected_secret = expected[secret.id]
del expected[secret.id]
self._assert_secret_attributes_equal(expected_secret.properties, secret)
self.assertEqual(len(expected), 0)
@all_api_versions()
@client_setup
async def test_backup_restore(self, client, **kwargs):
secret_name = self.get_resource_name("secbak")
secret_value = "secVal"
# create secret
created_bundle = await client.set_secret(secret_name, secret_value)
# backup secret
secret_backup = await client.backup_secret(created_bundle.name)
self.assertIsNotNone(secret_backup, "secret_backup")
# delete secret
await client.delete_secret(created_bundle.name)
# purge secret
await client.purge_deleted_secret(created_bundle.name)
# restore secret
restore_function = functools.partial(client.restore_secret_backup, secret_backup)
restored_secret = await self._poll_until_no_exception(restore_function, expected_exception=ResourceExistsError)
self._assert_secret_attributes_equal(created_bundle.properties, restored_secret)
@all_api_versions()
@client_setup
async def test_recover(self, client, **kwargs):
secrets = {}
# create secrets to recover
for i in range(self.list_test_size):
secret_name = self.get_resource_name("secret{}".format(i))
secret_value = "value{}".format(i)
secrets[secret_name] = await client.set_secret(secret_name, secret_value)
# delete all secrets
for secret_name in secrets.keys():
await client.delete_secret(secret_name)
# validate all our deleted secrets are returned by list_deleted_secrets
async for deleted_secret in client.list_deleted_secrets():
if deleted_secret.name in secrets:
secrets.pop(deleted_secret.name)
assert len(secrets.keys()) == 0
# recover select secrets
for secret_name in secrets.keys():
await client.recover_deleted_secret(secret_name)
# validate the recovered secrets exist
for secret in secrets.keys():
get_function = functools.partial(client.get_secret, secret)
await self._poll_until_no_exception(get_function, expected_exception=ResourceNotFoundError)
@all_api_versions()
@client_setup
async def test_purge(self, client, **kwargs):
secrets = {}
# create secrets to purge
for i in range(self.list_test_size):
secret_name = self.get_resource_name("secret{}".format(i))
secret_value = "value{}".format(i)
secrets[secret_name] = await client.set_secret(secret_name, secret_value)
# delete all secrets
for secret_name in secrets.keys():
await client.delete_secret(secret_name)
# validate all our deleted secrets are returned by list_deleted_secrets
async for deleted_secret in client.list_deleted_secrets():
if deleted_secret.name in secrets:
secrets.pop(deleted_secret.name)
assert len(secrets.keys()) == 0
# purge secrets
for secret_name in secrets.keys():
await client.purge_deleted_secret(secret_name)
@logging_enabled()
@client_setup
async def test_logging_enabled(self, client, **kwargs):
mock_handler = MockHandler()
logger = logging.getLogger("azure")
logger.addHandler(mock_handler)
logger.setLevel(logging.DEBUG)
secret_name = self.get_resource_name("secret-name")
await client.set_secret(secret_name, "secret-value")
for message in mock_handler.messages:
if message.levelname == "DEBUG" and message.funcName == "on_request":
# parts of the request are logged on new lines in a single message
if "'/n" in message.message:
request_sections = message.message.split("/n")
else:
request_sections = message.message.split("\n")
for section in request_sections:
try:
# the body of the request should be JSON
body = json.loads(section)
if body["value"] == "secret-value":
mock_handler.close()
return
except (ValueError, KeyError):
# this means the request section is not JSON
pass
mock_handler.close()
assert False, "Expected request body wasn't logged"
@logging_disabled()
@client_setup
async def test_logging_disabled(self, client, **kwargs):
mock_handler = MockHandler()
logger = logging.getLogger("azure")
logger.addHandler(mock_handler)
logger.setLevel(logging.DEBUG)
secret_name = self.get_resource_name("secret-name")
await client.set_secret(secret_name, "secret-value")
for message in mock_handler.messages:
if message.levelname == "DEBUG" and message.funcName == "on_request":
# parts of the request are logged on new lines in a single message
if "'/n" in message.message:
request_sections = message.message.split("/n")
else:
request_sections = message.message.split("\n")
for section in request_sections:
try:
# the body of the request should be JSON
body = json.loads(section)
if body["value"] == "secret-value":
mock_handler.close()
assert False, "Client request body was logged"
except (ValueError, KeyError):
# this means the message is not JSON or has no kty property
pass
mock_handler.close()
def test_service_headers_allowed_in_logs():
service_headers = {"x-ms-keyvault-network-info", "x-ms-keyvault-region", "x-ms-keyvault-service-version"}
client = SecretClient("...", object())
assert service_headers.issubset(client._client._config.http_logging_policy.allowed_header_names)
def test_custom_hook_policy():
class CustomHookPolicy(SansIOHTTPPolicy):
pass
client = SecretClient("...", object(), custom_hook_policy=CustomHookPolicy())
assert isinstance(client._client._config.custom_hook_policy, CustomHookPolicy)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import django
from django.db.models.sql import compiler
import re
NEEDS_AGGREGATES_FIX = django.VERSION[:2] < (1, 7)
# query_class returns the base class to use for Django queries.
# The custom 'SqlServerQuery' class derives from django.db.models.sql.query.Query
# which is passed in as "QueryClass" by Django itself.
#
# SqlServerQuery overrides:
# ...insert queries to add "SET IDENTITY_INSERT" if needed.
# ...select queries to emulate LIMIT/OFFSET for sliced queries.
# Pattern to scan a column data type string and split the data type from any
# constraints or other included parts of a column definition. Based upon
# <column_definition> from http://msdn.microsoft.com/en-us/library/ms174979.aspx
_re_data_type_terminator = re.compile(
r'\s*\b(?:' +
r'filestream|collate|sparse|not|null|constraint|default|identity|rowguidcol' +
r'|primary|unique|clustered|nonclustered|with|on|foreign|references|check' +
')',
re.IGNORECASE,
)
# Pattern used in column aliasing to find sub-select placeholders
_re_col_placeholder = re.compile(r'\{_placeholder_(\d+)\}')
# Pattern to find the quoted column name at the end of a field specification
_re_pat_col = re.compile(r"\[([^\[]+)\]$")
_re_order_limit_offset = re.compile(
r'(?:ORDER BY\s+(.+?))?\s*(?:LIMIT\s+(\d+))?\s*(?:OFFSET\s+(\d+))?$')
_re_find_order_direction = re.compile(r'\s+(asc|desc)\s*$', re.IGNORECASE)
def _get_order_limit_offset(sql):
return _re_order_limit_offset.search(sql).groups()
def _remove_order_limit_offset(sql):
return _re_order_limit_offset.sub('', sql).split(None, 1)[1]
def _break(s, find):
"""Break a string s into the part before the substring to find,
and the part including and after the substring."""
i = s.find(find)
return s[:i], s[i:]
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
values = []
index_extra_select = len(self.query.extra_select)
for value, field in zip_longest(row[index_extra_select:], fields):
# print '\tfield=%s\tvalue=%s' % (repr(field), repr(value))
if field:
try:
value = self.connection.ops.convert_values(value, field)
except ValueError:
pass
values.append(value)
return row[:index_extra_select] + tuple(values)
def compile(self, node, select_format=False):
"""
Added with Django 1.7 as a mechanism to evalute expressions
"""
sql_function = getattr(node, 'function', None)
if sql_function and sql_function in self.connection.ops._sql_function_overrides:
sql_function, sql_template = self.connection.ops._sql_function_overrides[sql_function]
if sql_function:
node.function = sql_function
if sql_template:
node.template = sql_template
return super(SQLCompiler, self).compile(node)
def _fix_aggregates(self):
"""
MSSQL doesn't match the behavior of the other backends on a few of
the aggregate functions; different return type behavior, different
function names, etc.
MSSQL's implementation of AVG maintains datatype without proding. To
match behavior of other django backends, it needs to not drop remainders.
E.g. AVG([1, 2]) needs to yield 1.5, not 1
"""
for alias, aggregate in self.query.annotation_select.items():
sql_function = getattr(aggregate, 'sql_function', None)
if not sql_function or sql_function not in self.connection.ops._sql_function_overrides:
continue
sql_function, sql_template = self.connection.ops._sql_function_overrides[sql_function]
if sql_function:
self.query.annotation_select[alias].sql_function = sql_function
if sql_template:
self.query.annotation_select[alias].sql_template = sql_template
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
# Django #12192 - Don't execute any DB query when QS slicing results in limit 0
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self._using_row_number = False
# Get out of the way if we're not a select query or there's no limiting involved.
check_limits = with_limits and (self.query.low_mark or self.query.high_mark is not None)
if not check_limits:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
try:
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
result = super(SQLCompiler, self).as_sql(with_limits, with_col_aliases)
finally:
# remove in case query is every reused
delattr(self.query, '_mssql_ordering_not_allowed')
return result
raw_sql, fields = super(SQLCompiler, self).as_sql(False, with_col_aliases)
# Check for high mark only and replace with "TOP"
if self.query.high_mark is not None and not self.query.low_mark:
_select = 'SELECT'
if self.query.distinct:
_select += ' DISTINCT'
sql = re.sub(r'(?i)^{0}'.format(_select), '{0} TOP {1}'.format(_select, self.query.high_mark), raw_sql, 1)
return sql, fields
# Else we have limits; rewrite the query using ROW_NUMBER()
self._using_row_number = True
# Lop off ORDER... and the initial "SELECT"
inner_select = _remove_order_limit_offset(raw_sql)
outer_fields, inner_select = self._alias_columns(inner_select)
order = _get_order_limit_offset(raw_sql)[0]
qn = self.connection.ops.quote_name
inner_table_name = qn('AAAA')
outer_fields, inner_select, order = self._fix_slicing_order(outer_fields, inner_select, order, inner_table_name)
# map a copy of outer_fields for injected subselect
f = []
for x in outer_fields.split(','):
i = x.upper().find(' AS ')
if i != -1:
x = x[i + 4:]
if x.find('.') != -1:
tbl, col = x.rsplit('.', 1)
else:
col = x
f.append('{0}.{1}'.format(inner_table_name, col.strip()))
# inject a subselect to get around OVER requiring ORDER BY to come from FROM
inner_select = '{fields} FROM ( SELECT {inner} ) AS {inner_as}'.format(
fields=', '.join(f),
inner=inner_select,
inner_as=inner_table_name,
)
where_row_num = '{0} < _row_num'.format(self.query.low_mark)
if self.query.high_mark:
where_row_num += ' and _row_num <= {0}'.format(self.query.high_mark)
sql = """SELECT _row_num, {outer}
FROM ( SELECT ROW_NUMBER() OVER ( ORDER BY {order}) as _row_num, {inner}) as QQQ
WHERE {where}""".format(
outer=outer_fields,
order=order,
inner=inner_select,
where=where_row_num,
)
return sql, fields
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
"""
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
"""
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '[{0}___o{1}]'.format(
col.strip('[]'),
alias_id,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order
def _alias_columns(self, sql):
"""Return tuple of SELECT and FROM clauses, aliasing duplicate column names."""
qn = self.connection.ops.quote_name
outer = list()
inner = list()
names_seen = list()
# replace all parens with placeholders
paren_depth, paren_buf = 0, ['']
parens, i = {}, 0
for ch in sql:
if ch == '(':
i += 1
paren_depth += 1
paren_buf.append('')
elif ch == ')':
paren_depth -= 1
key = '_placeholder_{0}'.format(i)
buf = paren_buf.pop()
# store the expanded paren string
parens[key] = buf.format(**parens)
paren_buf[paren_depth] += '({' + key + '})'
else:
paren_buf[paren_depth] += ch
def _replace_sub(col):
"""Replace all placeholders with expanded values"""
while _re_col_placeholder.search(col):
col = col.format(**parens)
return col
temp_sql = ''.join(paren_buf)
select_list, from_clause = _break(temp_sql, ' FROM [')
for col in [x.strip() for x in select_list.split(',')]:
match = _re_pat_col.search(col)
if match:
col_name = match.group(1)
col_key = col_name.lower()
if col_key in names_seen:
alias = qn('{0}___{1}'.format(col_name, names_seen.count(col_key)))
outer.append(alias)
inner.append('{0} as {1}'.format(_replace_sub(col), alias))
else:
outer.append(qn(col_name))
inner.append(_replace_sub(col))
names_seen.append(col_key)
else:
raise Exception('Unable to find a column name when parsing SQL: {0}'.format(col))
return ', '.join(outer), ', '.join(inner) + from_clause.format(**parens)
def get_ordering(self):
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
if getattr(self.query, '_mssql_ordering_not_allowed', False):
if django.VERSION[1] == 1 and django.VERSION[2] < 6:
return (None, [])
return (None, [], [])
return super(SQLCompiler, self).get_ordering()
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
# search for after table/column list
_re_values_sub = re.compile(
r'(?P<prefix>\)|\])(?P<default>\s*|\s*default\s*)values(?P<suffix>\s*|\s+\()?',
re.IGNORECASE
)
# ... and insert the OUTPUT clause between it and the values list (or DEFAULT VALUES).
_values_repl = r'\g<prefix> OUTPUT INSERTED.{col} INTO @sqlserver_ado_return_id\g<default>VALUES\g<suffix>'
def as_sql(self, *args, **kwargs):
# Fix for Django ticket #14019
if not hasattr(self, 'return_id'):
self.return_id = False
result = super(SQLInsertCompiler, self).as_sql(*args, **kwargs)
if isinstance(result, list):
# Django 1.4 wraps return in list
return [self._fix_insert(x[0], x[1]) for x in result]
sql, params = result
return self._fix_insert(sql, params)
def _fix_insert(self, sql, params):
"""
Wrap the passed SQL with IDENTITY_INSERT statements and apply
other necessary fixes.
"""
meta = self.query.get_meta()
if meta.has_auto_field:
if hasattr(self.query, 'fields'):
# django 1.4 replaced columns with fields
fields = self.query.fields
auto_field = meta.auto_field
else:
# < django 1.4
fields = self.query.columns
auto_field = meta.auto_field.db_column or meta.auto_field.column
auto_in_fields = auto_field in fields
quoted_table = self.connection.ops.quote_name(meta.db_table)
if not fields or (auto_in_fields and len(fields) == 1 and not params):
# convert format when inserting only the primary key without
# specifying a value
sql = 'INSERT INTO {0} DEFAULT VALUES'.format(
quoted_table
)
params = []
elif auto_in_fields:
# wrap with identity insert
sql = 'SET IDENTITY_INSERT {table} ON;{sql};SET IDENTITY_INSERT {table} OFF'.format(
table=quoted_table,
sql=sql,
)
# mangle SQL to return ID from insert
# http://msdn.microsoft.com/en-us/library/ms177564.aspx
if self.return_id and self.connection.features.can_return_id_from_insert:
col = self.connection.ops.quote_name(meta.pk.db_column or meta.pk.get_attname())
# Determine datatype for use with the table variable that will return the inserted ID
pk_db_type = _re_data_type_terminator.split(meta.pk.db_type(self.connection))[0]
# NOCOUNT ON to prevent additional trigger/stored proc related resultsets
sql = 'SET NOCOUNT ON;{declare_table_var};{sql};{select_return_id}'.format(
sql=sql,
declare_table_var="DECLARE @sqlserver_ado_return_id table ({col_name} {pk_type})".format(
col_name=col,
pk_type=pk_db_type,
),
select_return_id="SELECT * FROM @sqlserver_ado_return_id",
)
output = self._values_repl.format(col=col)
sql = self._re_values_sub.sub(output, sql)
return sql, params
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
sql, params = super(SQLUpdateCompiler, self).as_sql()
if sql:
# Need the NOCOUNT OFF so UPDATE returns a count, instead of -1
sql = 'SET NOCOUNT OFF; {0}; SET NOCOUNT ON'.format(sql)
return sql, params
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
self._fix_aggregates()
return super(SQLAggregateCompiler, self).as_sql()
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.fortran
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Fortran languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, words, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['FortranLexer', 'FortranFixedLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONCURRRENT', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'EXIT', 'EXTENDS',
'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'GOTO', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'OPEN', 'OPTIONAL',
'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE',
'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+([ed][-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*([ed][-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+(\.\d*)?[ed][-+]?\d+(_[a-z]\w+)?', Number.Float),
],
}
class FortranFixedLexer(RegexLexer):
"""
Lexer for fixed format Fortran.
.. versionadded:: 2.1
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code'),
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text), 'root'),
default('root'),
]
}
| |
"""Encoder for sentences withou explicit segmentation."""
from typing import Tuple, List
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.encoders.recurrent import RNNCellTuple
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.sequence import Sequence
from neuralmonkey.model.stateful import TemporalStatefulWithOutput
from neuralmonkey.nn.noisy_gru_cell import NoisyGRUCell
from neuralmonkey.nn.ortho_gru_cell import OrthoGRUCell
from neuralmonkey.nn.utils import dropout
from neuralmonkey.nn.highway import highway
from neuralmonkey.decorators import tensor
from neuralmonkey.tf_utils import get_variable
# pylint: disable=too-many-instance-attributes
class SentenceCNNEncoder(ModelPart, TemporalStatefulWithOutput):
"""Recurrent over Convolutional Encoder.
Encoder processing a sentence using a CNN
then running a bidirectional RNN on the result.
Based on: Jason Lee, Kyunghyun Cho, Thomas Hofmann: Fully
Character-Level Neural Machine Translation without Explicit
Segmentation.
See https://arxiv.org/pdf/1610.03017.pdf
"""
# pylint: disable=too-many-arguments,too-many-locals
# pylint: disable=too-many-statements
def __init__(self,
name: str,
input_sequence: Sequence,
segment_size: int,
highway_depth: int,
rnn_size: int,
filters: List[Tuple[int, int]],
dropout_keep_prob: float = 1.0,
use_noisy_activations: bool = False,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Create a new instance of the sentence encoder.
Arguments:
name: An unique identifier for this encoder
segment_size: The size of the segments over which we apply
max-pooling.
highway_depth: Depth of the highway layer.
rnn_size: The size of the encoder's hidden state. Note
that the actual encoder output state size will be
twice as long because it is the result of
concatenation of forward and backward hidden states.
filters: Specification of CNN filters. It is a list of tuples
specifying the filter size and number of channels.
Keyword arguments:
dropout_keep_prob: The dropout keep probability
(default 1.0)
"""
ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
initializers)
check_argument_types()
self.input_sequence = input_sequence
self.segment_size = segment_size
self.highway_depth = highway_depth
self.rnn_size = rnn_size
self.filters = filters
self.dropout_keep_prob = dropout_keep_prob
self.use_noisy_activations = use_noisy_activations
if dropout_keep_prob <= 0. or dropout_keep_prob > 1.:
raise ValueError(
("Dropout keep probability must be "
"in (0; 1], was {}").format(dropout_keep_prob))
if rnn_size <= 0:
raise ValueError("RNN size must be a positive integer.")
if highway_depth <= 0:
raise ValueError("Highway depth must be a positive integer.")
if segment_size <= 0:
raise ValueError("Segment size be a positive integer.")
if not filters:
raise ValueError("You must specify convolutional filters.")
for filter_size, num_filters in self.filters:
if filter_size <= 0:
raise ValueError("Filter size must be a positive integer.")
if num_filters <= 0:
raise ValueError("Number of filters must be a positive int.")
@tensor
def cnn_encoded(self) -> tf.Tensor:
"""1D convolution with max-pool that processing characters."""
dropped_inputs = dropout(self.input_sequence.temporal_states,
self.dropout_keep_prob, self.train_mode)
pooled_outputs = []
for filter_size, num_filters in self.filters:
with tf.variable_scope("conv-maxpool-%s" % filter_size):
filter_shape = [filter_size, self.input_sequence.dimension,
num_filters]
w_filter = get_variable(
"conv_W", filter_shape,
initializer=tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
b_filter = get_variable(
"conv_bias", [num_filters],
initializer=tf.zeros_initializer())
conv = tf.nn.conv1d(
dropped_inputs,
w_filter,
stride=1,
padding="SAME",
name="conv")
# Apply nonlinearity
conv_relu = tf.nn.relu(tf.nn.bias_add(conv, b_filter))
# Max-pooling over the output segments
expanded_conv_relu = tf.expand_dims(conv_relu, -1)
pooled = tf.nn.max_pool(
expanded_conv_relu,
ksize=[1, self.segment_size, 1, 1],
strides=[1, self.segment_size, 1, 1],
padding="SAME",
name="maxpool")
pooled_outputs.append(pooled)
# Combine all the pooled features
concat = tf.concat(pooled_outputs, axis=2)
return tf.squeeze(concat, [3])
@tensor
def highway_layer(self) -> tf.Tensor:
"""Highway net projection following the CNN."""
# pylint: disable=no-member
cnn_out_size = self.cnn_encoded.get_shape().as_list()[-1]
# pylint: enable=no-member
highway_layer = tf.reshape(self.cnn_encoded, [-1, cnn_out_size])
for i in range(self.highway_depth):
highway_layer = highway(
highway_layer,
scope=("highway_layer_%s" % i))
return tf.reshape(
highway_layer,
[self.batch_size, -1, cnn_out_size])
@tensor
def bidirectional_rnn(self) -> Tuple[Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor]]:
# BiRNN Network
fw_cell, bw_cell = self.rnn_cells() # type: RNNCellTuple
seq_lens = tf.ceil(tf.divide(
self.input_sequence.lengths,
self.segment_size))
seq_lens = tf.cast(seq_lens, tf.int32)
return tf.nn.bidirectional_dynamic_rnn(
fw_cell, bw_cell, self.highway_layer,
sequence_length=seq_lens,
dtype=tf.float32)
@tensor
def temporal_states(self) -> tf.Tensor:
# pylint: disable=unsubscriptable-object
return tf.concat(self.bidirectional_rnn[0], 2)
# pylint: enable=unsubscriptable-object
@tensor
def output(self) -> tf.Tensor:
# pylint: disable=unsubscriptable-object
return tf.concat(self.bidirectional_rnn[1], 1)
# pylint: enable=unsubscriptable-object
@tensor
def temporal_mask(self) -> tf.Tensor:
expanded = tf.expand_dims(
tf.expand_dims(self.input_sequence.temporal_mask, -1),
-1)
pooled = tf.nn.max_pool(
expanded,
ksize=[1, self.segment_size, 1, 1],
strides=[1, self.segment_size, 1, 1],
padding="SAME")
return tf.squeeze(pooled, [2, 3])
def rnn_cells(self) -> RNNCellTuple:
"""Return the graph template to for creating RNN memory cells."""
if self.use_noisy_activations:
return(NoisyGRUCell(self.rnn_size, self.train_mode),
NoisyGRUCell(self.rnn_size, self.train_mode))
return (OrthoGRUCell(self.rnn_size),
OrthoGRUCell(self.rnn_size))
| |
import oculusvr as ovr
import numpy as np
import pygame
import pygame.locals as pgl
from OpenGL.GL import *
from cgkit.cgtypes import mat4, vec3, quat
from ctypes import *
from oculusvr import Hmd, ovrGLTexture, ovrPosef, ovrVector3f
class RiftApp():
def __init__(self):
ovr.Hmd.initialize()
self.hmd = ovr.Hmd()
self.hmdDesc = self.hmd.hmd.contents # cast(self.hmd.hmd,POINTER(ovrHmdDesc)).contents
self.frame = 0
# Workaround for a race condition bug in the SDK
import time
time.sleep(0.1)
self.hmd.configure_tracking()
self.fovPorts = (
self.hmdDesc.DefaultEyeFov[0],
self.hmdDesc.DefaultEyeFov[1]
)
projections = map(
lambda fovPort:
(ovr.Hmd.get_perspective(
fovPort, 0.01, 1000, True)),
self.fovPorts
)
self.projections = map(
lambda pr:
pr.toList(),
projections)
self.eyeTextures = [ ovrGLTexture(), ovrGLTexture() ]
for eye in range(0, 2):
size = self.hmd.get_fov_texture_size(
eye, self.fovPorts[eye])
eyeTexture = self.eyeTextures[eye]
eyeTexture.API = ovr.ovrRenderAPI_OpenGL
header = eyeTexture.Texture.Header;
header.TextureSize = size
vp = header.RenderViewport;
vp.Size = size
vp.Pos.x = 0
vp.Pos.y = 0
def close(self):
glDeleteFramebuffers(2, self.fbo)
glDeleteTextures(self.color)
glDeleteRenderbuffers(2, self.depth)
self.hmd.destroy()
self.hmd = None
ovr.Hmd.shutdown()
def create_window(self):
import os
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (
self.hmdDesc.WindowsPos.x,
self.hmdDesc.WindowsPos.y)
pygame.init()
pygame.display.set_mode(
(
self.hmdDesc.Resolution.w,
self.hmdDesc.Resolution.h
),
pgl.HWSURFACE | pgl.OPENGL | pgl.DOUBLEBUF | pgl.NOFRAME)
window_info = pygame.display.get_wm_info()
window = c_void_p(window_info['window'])
ovr.ovrHmd_AttachToWindow(self.hmd.hmd, window, 0, 0)
def init_gl(self):
self.fbo = glGenFramebuffers(2)
self.color = glGenTextures(2)
self.depth = glGenRenderbuffers(2)
for eye in range(0, 2):
self.build_framebuffer(eye)
self.eyeTextures[eye].OGL.TexId = np.asscalar(self.color[eye])
rc = ovr.ovrRenderAPIConfig()
header = rc.Header;
header.API = ovr.ovrRenderAPI_OpenGL
header.BackBufferSize = self.hmdDesc.Resolution
header.Multisample = 1
for i in range(0, 8):
rc.PlatformData[i] = 0 #ctypes.cast(, ctypes.c_uint)
self.eyeRenderDescs = \
self.hmd.configure_rendering(rc, self.fovPorts)
self.eyeOffsets = [ ovrVector3f(), ovrVector3f() ]
for eye in range(0, 2):
self.eyeOffsets[eye] = self.eyeRenderDescs[eye].HmdToEyeViewOffset
# Bug in the SDK leaves a program bound, so clear it
glUseProgram(0)
def build_framebuffer(self, eye):
size = self.eyeTextures[eye].Texture.Header.TextureSize
# Set up the color attachement texture
glBindTexture(GL_TEXTURE_2D, self.color[eye])
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8,
size.w, size.h, 0, GL_RGB,
GL_UNSIGNED_BYTE, None)
glBindTexture(GL_TEXTURE_2D, 0)
# Set up the depth attachment renderbuffer
glBindRenderbuffer(GL_RENDERBUFFER, self.depth[eye])
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT,
size.w, size.h)
glBindRenderbuffer(GL_RENDERBUFFER, 0)
# Set up the framebuffer proper
glBindFramebuffer(GL_FRAMEBUFFER, self.fbo[eye])
glFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
self.color[eye], 0)
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER,
self.depth[eye])
fboStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER)
if (GL_FRAMEBUFFER_COMPLETE != fboStatus):
raise Exception("Bad framebuffer setup")
glBindFramebuffer(GL_FRAMEBUFFER, 0)
def render_frame(self):
self.frame += 1
# Fetch the head pose
poses = self.hmd.get_eye_poses(self.frame, self.eyeOffsets)
self.hmd.begin_frame(self.frame)
for i in range(0, 2):
eye = self.hmdDesc.EyeRenderOrder[i]
glMatrixMode(GL_PROJECTION)
glLoadMatrixf(self.projections[eye])
self.eyeview = mat4(1.0)
# Apply the head orientation
rot = poses[eye].Orientation
# Convert the OVR orientation (a quaternion
# structure) to a cgkit quaternion class, and
# from there to a mat4 Coordinates are camera
# coordinates
rot = quat(rot.toList())
rot = rot.toMat4()
# Apply the head position
pos = poses[eye].Position
# Convert the OVR position (a vector3 structure)
# to a cgcit vector3 class. Position is in camera /
# Rift coordinates
pos = vec3(pos.toList())
pos = mat4(1.0).translate(pos)
pose = pos * rot
# apply it to the eyeview matrix
self.eyeview = pose;
# The subclass is responsible for taking eyeview
# and applying it to whatever camera or modelview
# coordinate system it uses before rendering the
# scene
# Active the offscreen framebuffer and render the scene
glBindFramebuffer(GL_FRAMEBUFFER, self.fbo[eye])
size = self.eyeTextures[eye].Texture.Header.RenderViewport.Size
glViewport(0, 0, size.w, size.h)
self.render_scene()
glBindFramebuffer(GL_FRAMEBUFFER, 0)
self.hmd.end_frame(poses, self.eyeTextures)
glGetError()
def update(self):
for event in pygame.event.get():
self.on_event(event)
def on_event(self, event):
if event.type == pgl.QUIT:
self.running = False
return True
if event.type == pgl.KEYUP and event.key == pgl.K_ESCAPE:
self.running = False
return True
return False
def run(self):
self.create_window()
self.init_gl()
self.running = True
start = ovr.Hmd.get_time_in_seconds()
last = start
while self.running:
self.update()
self.render_frame()
#pygame.display.flip()
now = ovr.Hmd.get_time_in_seconds()
if (now - last > 10):
interval = now - start
fps = self.frame / interval
print "%f" % fps
last = now
self.close()
pygame.quit()
| |
from __future__ import absolute_import, unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from taggit.models import Tag
from wagtail.tests.snippets.forms import FancySnippetForm
from wagtail.tests.snippets.models import (
AlphaSnippet, FancySnippet, RegisterDecorator, RegisterFunction, SearchableSnippet,
StandardSnippet, ZuluSnippet)
from wagtail.tests.testapp.models import Advert, AdvertWithTabbedInterface, SnippetChooserModel
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailadmin.forms import WagtailAdminModelForm
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsnippets.models import SNIPPET_MODELS, register_snippet
from wagtail.wagtailsnippets.views.snippets import get_snippet_edit_handler
class TestSnippetIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailsnippets:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/index.html')
def test_displays_snippet(self):
self.assertContains(self.get(), "Adverts")
class TestSnippetListView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailsnippets:list',
args=('tests', 'advert')),
params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/type_index.html')
def test_simple_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/type_index.html')
def test_displays_add_button(self):
self.assertContains(self.get(), "Add advert")
def test_not_searchable(self):
self.assertFalse(self.get().context['is_searchable'])
class TestSnippetListViewWithSearchableSnippet(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = SearchableSnippet.objects.create(text="Hello")
self.snippet_b = SearchableSnippet.objects.create(text="World")
self.snippet_c = SearchableSnippet.objects.create(text="Hello World")
def get(self, params={}):
return self.client.get(reverse('wagtailsnippets:list',
args=('snippetstests', 'searchablesnippet')),
params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/type_index.html')
# All snippets should be in items
items = list(response.context['items'].object_list)
self.assertIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_is_searchable(self):
self.assertTrue(self.get().context['is_searchable'])
def test_search_hello(self):
response = self.get({'q': "Hello"})
# Just snippets with "Hello" should be in items
items = list(response.context['items'].object_list)
self.assertIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_search_world(self):
response = self.get({'q': "World"})
# Just snippets with "World" should be in items
items = list(response.context['items'].object_list)
self.assertNotIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
class TestSnippetCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailsnippets:add',
args=('tests', 'advert')),
params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailsnippets:add',
args=('tests', 'advert')),
post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/create.html')
self.assertNotContains(response, '<ul class="tab-nav merged">')
self.assertNotContains(response, '<a href="#advert" class="active">Advert</a>', html=True)
self.assertNotContains(response, '<a href="#other" class="">Other</a>', html=True)
def test_snippet_with_tabbed_interface(self):
response = self.client.get(reverse('wagtailsnippets:add',
args=('tests', 'advertwithtabbedinterface')))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/create.html')
self.assertContains(response, '<ul class="tab-nav merged">')
self.assertContains(response, '<a href="#advert" class="active">Advert</a>', html=True)
self.assertContains(response, '<a href="#other" class="">Other</a>', html=True)
def test_create_invalid(self):
response = self.post(post_data={'foo': 'bar'})
self.assertContains(response, "The snippet could not be created due to errors.")
self.assertContains(response, "This field is required.")
def test_create(self):
response = self.post(post_data={'text': 'test_advert',
'url': 'http://www.example.com/'})
self.assertRedirects(response, reverse('wagtailsnippets:list', args=('tests', 'advert')))
snippets = Advert.objects.filter(text='test_advert')
self.assertEqual(snippets.count(), 1)
self.assertEqual(snippets.first().url, 'http://www.example.com/')
def test_create_with_tags(self):
tags = ['hello', 'world']
response = self.post(post_data={'text': 'test_advert',
'url': 'http://example.com/',
'tags': ', '.join(tags)})
self.assertRedirects(response, reverse('wagtailsnippets:list',
args=('tests', 'advert')))
snippet = Advert.objects.get(text='test_advert')
expected_tags = list(Tag.objects.order_by('name').filter(name__in=tags))
self.assertEqual(len(expected_tags), 2)
self.assertEqual(
list(snippet.tags.order_by('name')),
expected_tags)
class TestSnippetEditView(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.test_snippet = Advert.objects.get(id=1)
self.test_snippet_with_tabbed_interface = AdvertWithTabbedInterface.objects.get(id=1)
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailsnippets:edit',
args=('tests', 'advert', self.test_snippet.id)),
params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailsnippets:edit',
args=('tests', 'advert', self.test_snippet.id)),
post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/edit.html')
self.assertNotContains(response, '<ul class="tab-nav merged">')
self.assertNotContains(response, '<a href="#advert" class="active">Advert</a>', html=True)
self.assertNotContains(response, '<a href="#other" class="">Other</a>', html=True)
def test_snippet_with_tabbed_interface(self):
reverse_args = ('tests', 'advertwithtabbedinterface', self.test_snippet_with_tabbed_interface.id)
response = self.client.get(reverse('wagtailsnippets:edit', args=reverse_args))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/edit.html')
self.assertContains(response, '<ul class="tab-nav merged">')
self.assertContains(response, '<a href="#advert" class="active">Advert</a>', html=True)
self.assertContains(response, '<a href="#other" class="">Other</a>', html=True)
def test_non_existant_model(self):
response = self.client.get(reverse('wagtailsnippets:edit', args=('tests', 'foo', self.test_snippet.id)))
self.assertEqual(response.status_code, 404)
def test_nonexistant_id(self):
response = self.client.get(reverse('wagtailsnippets:edit', args=('tests', 'advert', 999999)))
self.assertEqual(response.status_code, 404)
def test_edit_invalid(self):
response = self.post(post_data={'foo': 'bar'})
self.assertContains(response, "The snippet could not be saved due to errors.")
self.assertContains(response, "This field is required.")
def test_edit(self):
response = self.post(post_data={'text': 'edited_test_advert',
'url': 'http://www.example.com/edited'})
self.assertRedirects(response, reverse('wagtailsnippets:list', args=('tests', 'advert')))
snippets = Advert.objects.filter(text='edited_test_advert')
self.assertEqual(snippets.count(), 1)
self.assertEqual(snippets.first().url, 'http://www.example.com/edited')
def test_edit_with_tags(self):
tags = ['hello', 'world']
response = self.post(post_data={'text': 'edited_test_advert',
'url': 'http://www.example.com/edited',
'tags': ', '.join(tags)})
self.assertRedirects(response, reverse('wagtailsnippets:list',
args=('tests', 'advert')))
snippet = Advert.objects.get(text='edited_test_advert')
expected_tags = list(Tag.objects.order_by('name').filter(name__in=tags))
self.assertEqual(len(expected_tags), 2)
self.assertEqual(
list(snippet.tags.order_by('name')),
expected_tags)
class TestSnippetDelete(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.test_snippet = Advert.objects.get(id=1)
self.login()
def test_delete_get(self):
response = self.client.get(reverse('wagtailsnippets:delete', args=('tests', 'advert', self.test_snippet.id, )))
self.assertEqual(response.status_code, 200)
def test_delete_post(self):
response = self.client.post(
reverse('wagtailsnippets:delete', args=('tests', 'advert', self.test_snippet.id, ))
)
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailsnippets:list', args=('tests', 'advert')))
# Check that the page is gone
self.assertEqual(Advert.objects.filter(text='test_advert').count(), 0)
class TestSnippetChooserPanel(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
model = SnippetChooserModel
self.advert_text = 'Test advert text'
test_snippet = model.objects.create(
advert=Advert.objects.create(text=self.advert_text))
self.edit_handler_class = get_snippet_edit_handler(model)
self.form_class = self.edit_handler_class.get_form_class(model)
form = self.form_class(instance=test_snippet)
edit_handler = self.edit_handler_class(instance=test_snippet, form=form)
self.snippet_chooser_panel = [
panel for panel in edit_handler.children
if getattr(panel, 'field_name', None) == 'advert'][0]
def test_create_snippet_chooser_panel_class(self):
self.assertEqual(type(self.snippet_chooser_panel).__name__,
'_SnippetChooserPanel')
def test_render_as_field(self):
field_html = self.snippet_chooser_panel.render_as_field()
self.assertIn(self.advert_text, field_html)
self.assertIn("Choose advert", field_html)
self.assertIn("Choose another advert", field_html)
def test_render_as_empty_field(self):
test_snippet = SnippetChooserModel()
form = self.form_class(instance=test_snippet)
edit_handler = self.edit_handler_class(instance=test_snippet, form=form)
snippet_chooser_panel = [
panel for panel in edit_handler.children
if getattr(panel, 'field_name', None) == 'advert'
][0]
field_html = snippet_chooser_panel.render_as_field()
self.assertIn("Choose advert", field_html)
self.assertIn("Choose another advert", field_html)
def test_render_js(self):
self.assertIn('createSnippetChooser("id_advert", "tests/advert");',
self.snippet_chooser_panel.render_as_field())
def test_target_model_from_string(self):
# RemovedInWagtail16Warning: snippet_type argument
with self.ignore_deprecation_warnings():
result = SnippetChooserPanel(
'advert',
'tests.advert'
).bind_to_model(SnippetChooserModel).target_model()
self.assertIs(result, Advert)
def test_target_model_from_model(self):
# RemovedInWagtail16Warning: snippet_type argument
with self.ignore_deprecation_warnings():
result = SnippetChooserPanel(
'advert',
Advert
).bind_to_model(SnippetChooserModel).target_model()
self.assertIs(result, Advert)
def test_target_model_autodetected(self):
result = SnippetChooserPanel(
'advert'
).bind_to_model(SnippetChooserModel).target_model()
self.assertEqual(result, Advert)
def test_target_model_malformed_type(self):
# RemovedInWagtail16Warning: snippet_type argument
with self.ignore_deprecation_warnings():
result = SnippetChooserPanel(
'advert',
'snowman'
).bind_to_model(SnippetChooserModel)
self.assertRaises(ImproperlyConfigured,
result.target_model)
def test_target_model_nonexistent_type(self):
# RemovedInWagtail16Warning: snippet_type argument
with self.ignore_deprecation_warnings():
result = SnippetChooserPanel(
'advert',
'snowman.lorry'
).bind_to_model(SnippetChooserModel)
self.assertRaises(ImproperlyConfigured,
result.target_model)
class TestSnippetRegistering(TestCase):
def test_register_function(self):
self.assertIn(RegisterFunction, SNIPPET_MODELS)
def test_register_decorator(self):
# Misbehaving decorators often return None
self.assertIsNotNone(RegisterDecorator)
self.assertIn(RegisterDecorator, SNIPPET_MODELS)
class TestSnippetOrdering(TestCase):
def setUp(self):
register_snippet(ZuluSnippet)
register_snippet(AlphaSnippet)
def test_snippets_ordering(self):
# Ensure AlphaSnippet is before ZuluSnippet
# Cannot check first and last position as other snippets
# may get registered elsewhere during test
self.assertLess(SNIPPET_MODELS.index(AlphaSnippet),
SNIPPET_MODELS.index(ZuluSnippet))
class TestUsageCount(TestCase):
fixtures = ['test.json']
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_snippet_usage_count(self):
advert = Advert.objects.get(id=1)
self.assertEqual(advert.get_usage().count(), 2)
class TestUsedBy(TestCase):
fixtures = ['test.json']
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_snippet_used_by(self):
advert = Advert.objects.get(id=1)
self.assertEqual(type(advert.get_usage()[0]), Page)
class TestSnippetChoose(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
def get(self, params=None):
return self.client.get(reverse('wagtailsnippets:choose',
args=('tests', 'advert')),
params or {})
def test_simple(self):
response = self.get()
self.assertTemplateUsed(response, 'wagtailsnippets/chooser/choose.html')
def test_simple_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/chooser/choose.html')
def test_not_searchable(self):
self.assertFalse(self.get().context['is_searchable'])
class TestSnippetChooseWithSearchableSnippet(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = SearchableSnippet.objects.create(text="Hello")
self.snippet_b = SearchableSnippet.objects.create(text="World")
self.snippet_c = SearchableSnippet.objects.create(text="Hello World")
def get(self, params=None):
return self.client.get(reverse('wagtailsnippets:choose',
args=('snippetstests', 'searchablesnippet')),
params or {})
def test_simple(self):
response = self.get()
self.assertTemplateUsed(response, 'wagtailsnippets/chooser/choose.html')
# All snippets should be in items
items = list(response.context['items'].object_list)
self.assertIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_is_searchable(self):
self.assertTrue(self.get().context['is_searchable'])
def test_search_hello(self):
response = self.get({'q': "Hello"})
# Just snippets with "Hello" should be in items
items = list(response.context['items'].object_list)
self.assertIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_search_world(self):
response = self.get({'q': "World"})
# Just snippets with "World" should be in items
items = list(response.context['items'].object_list)
self.assertNotIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
class TestSnippetChosen(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
def get(self, pk, params=None):
return self.client.get(reverse('wagtailsnippets:chosen',
args=('tests', 'advert', pk)),
params or {})
def test_choose_a_page(self):
response = self.get(pk=Advert.objects.all()[0].pk)
self.assertTemplateUsed(response, 'wagtailsnippets/chooser/chosen.js')
def test_choose_a_non_existing_page(self):
response = self.get(999999)
self.assertEqual(response.status_code, 404)
class TestAddOnlyPermissions(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.test_snippet = Advert.objects.get(id=1)
# Create a user with add_advert permission but not change_advert
user = get_user_model().objects.create_user(
username='addonly',
email='addonly@example.com',
password='password'
)
add_permission = Permission.objects.get(content_type__app_label='tests', codename='add_advert')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
user.user_permissions.add(add_permission, admin_permission)
self.assertTrue(self.client.login(username='addonly', password='password'))
def test_get_index(self):
response = self.client.get(reverse('wagtailsnippets:list',
args=('tests', 'advert')))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/type_index.html')
# user should get an "Add advert" button
self.assertContains(response, "Add advert")
def test_get_add(self):
response = self.client.get(reverse('wagtailsnippets:add',
args=('tests', 'advert')))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/create.html')
def test_get_edit(self):
response = self.client.get(reverse('wagtailsnippets:edit',
args=('tests', 'advert', self.test_snippet.id)))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_delete(self):
response = self.client.get(reverse('wagtailsnippets:delete', args=('tests', 'advert', self.test_snippet.id, )))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
class TestEditOnlyPermissions(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.test_snippet = Advert.objects.get(id=1)
# Create a user with change_advert permission but not add_advert
user = get_user_model().objects.create_user(
username='changeonly',
email='changeonly@example.com',
password='password'
)
change_permission = Permission.objects.get(content_type__app_label='tests', codename='change_advert')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
user.user_permissions.add(change_permission, admin_permission)
self.assertTrue(self.client.login(username='changeonly', password='password'))
def test_get_index(self):
response = self.client.get(reverse('wagtailsnippets:list',
args=('tests', 'advert')))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/type_index.html')
# user should not get an "Add advert" button
self.assertNotContains(response, "Add advert")
def test_get_add(self):
response = self.client.get(reverse('wagtailsnippets:add',
args=('tests', 'advert')))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_edit(self):
response = self.client.get(reverse('wagtailsnippets:edit',
args=('tests', 'advert', self.test_snippet.id)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/edit.html')
def test_get_delete(self):
response = self.client.get(reverse('wagtailsnippets:delete', args=('tests', 'advert', self.test_snippet.id, )))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
class TestDeleteOnlyPermissions(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.test_snippet = Advert.objects.get(id=1)
# Create a user with delete_advert permission
user = get_user_model().objects.create_user(
username='deleteonly',
email='deleteeonly@example.com',
password='password'
)
change_permission = Permission.objects.get(content_type__app_label='tests', codename='delete_advert')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
user.user_permissions.add(change_permission, admin_permission)
self.assertTrue(self.client.login(username='deleteonly', password='password'))
def test_get_index(self):
response = self.client.get(reverse('wagtailsnippets:list',
args=('tests', 'advert')))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/type_index.html')
# user should not get an "Add advert" button
self.assertNotContains(response, "Add advert")
def test_get_add(self):
response = self.client.get(reverse('wagtailsnippets:add',
args=('tests', 'advert')))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_edit(self):
response = self.client.get(reverse('wagtailsnippets:edit',
args=('tests', 'advert', self.test_snippet.id)))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_delete(self):
response = self.client.get(reverse('wagtailsnippets:delete', args=('tests', 'advert', self.test_snippet.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsnippets/snippets/confirm_delete.html')
class TestSnippetEditHandlers(TestCase, WagtailTestUtils):
def test_standard_edit_handler(self):
edit_handler_class = get_snippet_edit_handler(StandardSnippet)
form_class = edit_handler_class.get_form_class(StandardSnippet)
self.assertTrue(issubclass(form_class, WagtailAdminModelForm))
self.assertFalse(issubclass(form_class, FancySnippetForm))
def test_fancy_edit_handler(self):
edit_handler_class = get_snippet_edit_handler(FancySnippet)
form_class = edit_handler_class.get_form_class(FancySnippet)
self.assertTrue(issubclass(form_class, WagtailAdminModelForm))
self.assertTrue(issubclass(form_class, FancySnippetForm))
| |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import mock
from oslo_config import cfg
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.common import config as common_config
from neutron.debug import commands
from neutron.debug import debug_agent
from neutron.extensions import portbindings
from neutron.tests import base
class MyApp(object):
def __init__(self, _stdout):
self.stdout = _stdout
class TestDebugCommands(base.BaseTestCase):
def setUp(self):
super(TestDebugCommands, self).setUp()
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF.register_opts(config.EXT_NET_BRIDGE_OPTS)
common_config.init([])
config.register_interface_driver_opts_helper(cfg.CONF)
device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists', return_value=False)
device_exists_p.start()
namespace_p = mock.patch(
'neutron.agent.linux.ip_lib.IpNetnsCommand')
namespace_p.start()
ensure_namespace_p = mock.patch(
'neutron.agent.linux.ip_lib.IPWrapper.ensure_namespace')
ensure_namespace_p.start()
dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = dvr_cls_p.start()
mock_driver = mock.MagicMock()
mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
mock_driver.get_device_name.return_value = 'tap12345678-12'
driver_cls.return_value = mock_driver
self.driver = mock_driver
client_cls_p = mock.patch('neutronclient.v2_0.client.Client')
client_cls = client_cls_p.start()
client_inst = mock.Mock()
client_cls.return_value = client_inst
fake_network = {'network': {'id': 'fake_net',
'tenant_id': 'fake_tenant',
'subnets': ['fake_subnet']}}
fake_port = {'port':
{'id': 'fake_port',
'device_owner': 'fake_device',
'mac_address': 'aa:bb:cc:dd:ee:ffa',
'network_id': 'fake_net',
'fixed_ips':
[{'subnet_id': 'fake_subnet', 'ip_address': '10.0.0.3'}]
}}
fake_ports = {'ports': [fake_port['port']]}
self.fake_ports = fake_ports
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
fake_subnet_v4 = {'subnet': {'name': 'fake_subnet_v4',
'id': 'fake_subnet',
'network_id': 'fake_net',
'gateway_ip': '10.0.0.1',
'dns_nameservers': ['10.0.0.2'],
'host_routes': [],
'cidr': '10.0.0.0/24',
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'ip_version': 4}}
client_inst.list_ports.return_value = fake_ports
client_inst.create_port.return_value = fake_port
client_inst.show_port.return_value = fake_port
client_inst.show_network.return_value = fake_network
client_inst.show_subnet.return_value = fake_subnet_v4
self.client = client_inst
mock_std = mock.Mock()
self.app = MyApp(mock_std)
self.app.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF,
client_inst,
mock_driver)
def _test_create_probe(self, device_owner):
cmd = commands.CreateProbe(self.app, None)
cmd_parser = cmd.get_parser('create_probe')
if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE:
args = ['fake_net', '--device-owner', 'compute']
else:
args = ['fake_net']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
fake_port = {'port':
{'device_owner': device_owner,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
portbindings.HOST_ID: cfg.CONF.host,
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.create_port(fake_port),
mock.call.show_subnet('fake_subnet')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.plug('fake_net',
'fake_port',
'tap12345678-12',
'aa:bb:cc:dd:ee:ffa',
bridge=None,
namespace=namespace),
mock.call.init_l3('tap12345678-12',
['10.0.0.3/24'],
namespace=namespace
)])
def test_create_network_probe(self):
self._test_create_probe(debug_agent.DEVICE_OWNER_NETWORK_PROBE)
def test_create_nova_probe(self):
self._test_create_probe(debug_agent.DEVICE_OWNER_COMPUTE_PROBE)
def _test_create_probe_external(self, device_owner):
fake_network = {'network': {'id': 'fake_net',
'tenant_id': 'fake_tenant',
'router:external': True,
'subnets': ['fake_subnet']}}
self.client.show_network.return_value = fake_network
cmd = commands.CreateProbe(self.app, None)
cmd_parser = cmd.get_parser('create_probe')
if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE:
args = ['fake_net', '--device-owner', 'compute']
else:
args = ['fake_net']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
fake_port = {'port':
{'device_owner': device_owner,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
portbindings.HOST_ID: cfg.CONF.host,
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.create_port(fake_port),
mock.call.show_subnet('fake_subnet')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.plug('fake_net',
'fake_port',
'tap12345678-12',
'aa:bb:cc:dd:ee:ffa',
bridge='',
namespace=namespace),
mock.call.init_l3('tap12345678-12',
['10.0.0.3/24'],
namespace=namespace
)])
def test_create_network_probe_external(self):
self._test_create_probe_external(
debug_agent.DEVICE_OWNER_NETWORK_PROBE)
def test_create_nova_probe_external(self):
self._test_create_probe_external(
debug_agent.DEVICE_OWNER_COMPUTE_PROBE)
def test_delete_probe(self):
cmd = commands.DeleteProbe(self.app, None)
cmd_parser = cmd.get_parser('delete_probe')
args = ['fake_port']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
namespace=namespace,
bridge=None)])
def test_delete_probe_external(self):
fake_network = {'network': {'id': 'fake_net',
'tenant_id': 'fake_tenant',
'router:external': True,
'subnets': ['fake_subnet']}}
self.client.show_network.return_value = fake_network
cmd = commands.DeleteProbe(self.app, None)
cmd_parser = cmd.get_parser('delete_probe')
args = ['fake_port']
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
namespace = 'qprobe-fake_port'
self.client.assert_has_calls([mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
namespace=namespace,
bridge='')])
def test_list_probe(self):
cmd = commands.ListProbe(self.app, None)
cmd_parser = cmd.get_parser('list_probe')
args = []
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.client.assert_has_calls(
[mock.call.list_ports(
device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE,
debug_agent.DEVICE_OWNER_COMPUTE_PROBE])])
def test_exec_command(self):
cmd = commands.ExecProbe(self.app, None)
cmd_parser = cmd.get_parser('exec_command')
args = ['fake_port', 'fake_command']
parsed_args = cmd_parser.parse_args(args)
with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
self.client.assert_has_calls([mock.call.show_port('fake_port')])
def test_clear_probe(self):
cmd = commands.ClearProbe(self.app, None)
cmd_parser = cmd.get_parser('clear_probe')
args = []
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
namespace = 'qprobe-fake_port'
self.client.assert_has_calls(
[mock.call.list_ports(
device_id=socket.gethostname(),
device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE,
debug_agent.DEVICE_OWNER_COMPUTE_PROBE]),
mock.call.show_port('fake_port'),
mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.delete_port('fake_port')])
self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY),
mock.call.unplug('tap12345678-12',
namespace=namespace,
bridge=None)])
def test_ping_all_with_ensure_port(self):
fake_ports = self.fake_ports
def fake_port_list(network_id=None, device_owner=None, device_id=None):
if network_id:
# In order to test ensure_port, return []
return {'ports': []}
return fake_ports
self.client.list_ports.side_effect = fake_port_list
cmd = commands.PingAll(self.app, None)
cmd_parser = cmd.get_parser('ping_all')
args = []
parsed_args = cmd_parser.parse_args(args)
namespace = 'qprobe-fake_port'
with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
fake_port = {'port':
{'device_owner': debug_agent.DEVICE_OWNER_NETWORK_PROBE,
'admin_state_up': True,
'network_id': 'fake_net',
'tenant_id': 'fake_tenant',
portbindings.HOST_ID: cfg.CONF.host,
'fixed_ips': [{'subnet_id': 'fake_subnet'}],
'device_id': socket.gethostname()}}
expected = [mock.call.show_network('fake_net'),
mock.call.show_subnet('fake_subnet'),
mock.call.create_port(fake_port),
mock.call.show_subnet('fake_subnet')]
self.client.assert_has_calls(expected)
self.driver.assert_has_calls([mock.call.init_l3('tap12345678-12',
['10.0.0.3/24'],
namespace=namespace
)])
def test_ping_all(self):
cmd = commands.PingAll(self.app, None)
cmd_parser = cmd.get_parser('ping_all')
args = []
parsed_args = cmd_parser.parse_args(args)
with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
expected = [mock.call.list_ports(),
mock.call.list_ports(
network_id='fake_net',
device_owner=debug_agent.DEVICE_OWNER_NETWORK_PROBE,
device_id=socket.gethostname()),
mock.call.show_subnet('fake_subnet'),
mock.call.show_port('fake_port')]
self.client.assert_has_calls(expected)
def test_ping_all_v6(self):
fake_subnet_v6 = {'subnet': {'name': 'fake_v6',
'ip_version': 6}}
self.client.show_subnet.return_value = fake_subnet_v6
cmd = commands.PingAll(self.app, None)
cmd_parser = cmd.get_parser('ping_all')
args = []
parsed_args = cmd_parser.parse_args(args)
with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns:
cmd.run(parsed_args)
ns.assert_has_calls([mock.call.execute(mock.ANY)])
self.client.assert_has_calls([mock.call.list_ports()])
| |
"""
This module provides resource management helper functions for lda server implementations.
Before importing this module, several environment variables must first be set to configure the database for storing resources.
The default storage implementation uses MongoDB, but this can be changed by setting the OPERATION_PRIMITIVES environment
variable to the name of a different module.
When using the default MongoDB implementation, 3 additional environment variables must be set:
1. MONGODB_DB_HOST - hostname of the MONGODB server to use
2. MONGODB_DB_POST - the MONGDOB server port
3. APP_NAME - the name of you application, which is used as the DB name where the resources will be stored
Example Usage:
import os
os.environ['MONGODB_DB_HOST'] = 'localhost'
os.environ['MONGODB_DB_PORT'] = '27017'
os.environ['APP_NAME'] = 'teststore'
import lda
foo_container_environ = {'HTTP_HOST': 'localhost', 'PATH_INFO': '/tst/foo', 'QUERY_STRING': ''}
new_foo_resource = {'rdfs_label': 'my foo', 'rdf_type': 'http://example.org#Foo'}
body, status, headers = lda.create_document(foo_container_environ, new_foo_resource, 'in_foo_container')
...
"""
import os
import json
import Cookie, jwt
from webob import Request
if 'URL_POLICY_CLASS' not in os.environ:
os.environ['URL_POLICY_CLASS'] = 'url_policy#TypeQualifiedHostnameTenantURLPolicy'
if 'CHECK_ACCESS_RIGHTS' not in os.environ:
os.environ['CHECK_ACCESS_RIGHTS'] = 'False'
import rdf_json
import example_logic_tier as base
from rdfgraphlib import rdfjson_to_graph, serialize_graph
from storage import operation_primitives
from base_constants import RDF, LDP
def create_document(environ, document, membership_property, complete_document_callback=None):
"""
Create and store a new lD resource and link it to the LD container resource with the REQUEST_URI of the specified WSGI ``environ``,
usually the target of an HTTP POST request. The new resource is initialized with the fields in ``document``,
an applicaion/json formatted dictionary, and then linked to the container resource using a back-pointer field specified by
``membership_property``. The ``membership_property`` must match the membership property associated with the specified
LD Container. An LD container is a virtual resource whose contents is generated by querying the database for resources that
reference it. See ``get_virtual_container`` for more details.
The return value is a triple of (body, status, headers). The values of headers and body depends on the status:
201 - Created => Success. headers is a list of headers to return to the client. It will contain at least a location entry with
the URL of the newly-created resource. The body is a dictionary that contains the application/json representation
of the created object.
others => headers may be an empty list or may include headers to return to the client.
The body is a list of pairs, where the first element of the pair identifies the field in error, or is ''.
The second element of the pair will start with a number, a space, and an optional string explaining the error.
:param environ: a WSGI environment identifying the LD container for the new resource
:param document: an application/json structure to initialize the new document
:param membership_property: property name used to link the new resource to its container
:param complete_document_callback: (optional)A callback function that will be invoked before returning the response document
"""
domain_logic = Domain_Logic(environ, complete_document_callback)
container_url = domain_logic.document_url()
document = dict(document) # make a copy before updating!
document[membership_property] = container_url
document = domain_logic.convert_compact_json_to_rdf_json(document)
status, headers, body = domain_logic.create_document(document, domain_logic.document_id + '/')
if not hasattr(body, 'graph_url'): # not an rdf_json document - probably an error condition
body = json.dumps(body, cls=rdf_json.RDF_JSON_Encoder)
else:
body = domain_logic.convert_rdf_json_to_compact_json(body)
return body, status, headers
def get_document(environ, complete_document_callback=None):
"""
Get the lD resource with the REQUEST_URI of the specified WSGI ``environ``, usually the target of an HTTP GET request.
The return value is a triple of (body, status, headers). The values of headers and body depends on the status:
200 - OK => Success. headers is a list of headers to return to the client.
The body is a dictionary that contains the application/json representation of the resource.
others => headers may be an empty list or may include headers to return to the client.
The body is a list of pairs, where the first element of the pair identifies the field in error, or is ''.
The second element of the pair will start with a number, a space, and an optional string explaining the error.
:param environ: a WSGI environment identifying the LD resource to get
:param complete_document_callback: (optional) A callback function that will be invoked before returning the response document
"""
domain_logic = Domain_Logic(environ, complete_document_callback)
status, headers, body = domain_logic.get_document()
add_standard_headers(environ, headers)
if not hasattr(body, 'graph_url'): # not an rdf_json document - probably an error condition
body = json.dumps(body, cls=rdf_json.RDF_JSON_Encoder)
else:
body = domain_logic.convert_rdf_json_to_compact_json(body)
return body, status, headers
def patch_document(environ, document, complete_document_callback=None):
"""
Patch the contents of ``document`` into the lD resource with the REQUEST_URI of the specified WSGI ``environ``, usually the target of
an HTTP PATCH request. The ``document`` argument, an applicaion/json formatted dictionary, contains the subset of the resource's fields
that are to be changed. A field value of None (null), indicates that the field should be removed from the resource.
The specified ``environ`` must also include a CE-Revision header, the value of which must match the resource's current ce_revision property.
This is usually the value that was last read by the client in a GET of the resource. If the ce_revision property in the database does
not match the value provided by the client, the patch will fail and an HTTP 409 (Conflict) status code will be returned.
If the update succeeds, the ce_revision in the database will be updated, along with the specidied fields, and an HTTP 200 (OK) status
code will be returned. A history document will also be created to capture the previous state of the resource.
The return value is a triple of (body, status, headers). The values of headers and body depends on the status:
200 - OK => Successful patch. headers is a list of headers to return to the client.
The body is a dictionary that contains the application/json representation of the updated resource.
others => headers may be an empty list or may include headers to return to the client.
The body is a list of pairs, where the first element of the pair identifies the field in error, or is ''.
The second element of the pair will start with a number, a space, and an optional string explaining the error.
:param environ: a WSGI environment identifying the LD resource to update and its expected revision (CE-Revision header)
:param document: an application/json structure containing the resource's fields to change
:param complete_document_callback: (optional)A callback function that will be invoked before returning the response document
"""
domain_logic = Domain_Logic(environ, complete_document_callback)
document = domain_logic.convert_compact_json_to_rdf_json(document)
status, headers, body = domain_logic.patch_document(document)
if not hasattr(body, 'graph_url'): # not an rdf_json document - probably an error condition
body = json.dumps(body, cls=rdf_json.RDF_JSON_Encoder)
else:
body = domain_logic.convert_rdf_json_to_compact_json(body)
return body, status, headers
def delete_document(environ):
"""
Delete the lD resource with the REQUEST_URI of the specified WSGI ``environ``, usually the target of an HTTP DELETE request.
The return value is a triple of (body, status, headers). The values of headers and body depends on the status:
204 - No content => Successful delete. headers is a list of headers to return to the client. body is an empty list.
others => headers may be an empty list or may include headers to return to the client.
The body is a list of pairs, where the first element of the pair identifies the field in error, or is ''.
The second element of the pair will start with a number, a space, and an optional string explaining the error.
:param environ: a WSGI environment identifying the LD resource to delete
"""
domain_logic = Domain_Logic(environ)
status, headers, body = domain_logic.delete_document()
add_standard_headers(environ, headers)
return body, status, headers
def get_virtual_container(environ, membership_property, complete_document_callback=None):
"""
Get the LD container resource with the REQUEST_URI of the specified WSGI ``environ``, usually the target of an HTTP GET request.
LD container resources are virtual resources. They are not stored in the database, but rather their contents is generated by
querying the database for resources that reference the container.
The URL of a container is of the form /<query-space>/<container-type> (e.g., "/tst/foo"). The representation of a container
is an ldp:DirectContainer as defined in the W3C Linked Data Platform 1.0 specification (http://www.w3.org/TR/2015/REC-ldp-20150226/).
TODO: more details
:param environ: a WSGI environment identifying the LD container resource
:param membership_property: property name used to query for resources in the container
:param complete_document_callback: (optional)A callback function that will be invoked for each resource in the container
"""
domain_logic = Domain_Logic(environ, complete_document_callback)
container_url = domain_logic.document_url()
converter = rdf_json.Compact_json_to_rdf_json_converter(domain_logic.namespace_mappings())
membership_predicate = converter.expand_predicate(membership_property)
body = domain_logic.create_container(container_url, container_url, membership_predicate)
status, body = domain_logic.complete_request_document(body)
if not hasattr(body, 'graph_url'): # not an rdf_json document - probably an error condition
body = json.dumps(body, cls=rdf_json.RDF_JSON_Encoder)
else:
body = domain_logic.convert_rdf_json_to_compact_json(body)
return body, status, []
def execute_query(environ, query, complete_document_callback=None):
"""
Execute the specified ``query`` against the collection identified REQUEST_URI of the specified WSGI ``environ``.
This function returns an ldp:Container with 0 or more matching documents. The ``query`` argument is a Python dictionary,
the format of which is TBD.
Queries are safe and idempotent. That is, they do not have side-effects, and (weaker and implied by safe) the result of doing them
muultiple times is the same as doing them once. In that sense, they are similar to a GET, but done via POST.
The return value is a triple of (body, status, headers). The values of headers and body depends on the status:
200 - OK => Success. headers is a list of headers to return to the client.
The body is an ldp:Container resource in application/json format.
others => headers may be an empty list or may include headers to return to the client.
The body is a list of pairs, where the first element of the pair identifies the field in error, or is ''.
The second element of the pair will start with a number, a space, and an optional string explaining the error.
:param environ: a WSGI environment identifying the database to query
:param query: the query to execute
:param complete_document_callback: (optional)A callback function that will be invoked for each resource in the query result
"""
domain_logic = Domain_Logic(environ, complete_document_callback)
#query = domain_logic.convert_compact_json_to_rdf_json(query)
status, headers, result = domain_logic.execute_query(query)
if status == 200:
container_url = domain_logic.request_url()
container_predicates = {
RDF+'type': rdf_json.URI(LDP+'BasicContainer'),
LDP+'contains': [rdf_json.URI(resource.default_subject()) for resource in result]
}
document = rdf_json.RDF_JSON_Document({container_url: container_predicates}, container_url)
domain_logic.add_member_detail(document, result)
body = domain_logic.convert_rdf_json_to_compact_json(document)
else:
body = json.dumps(result, cls=rdf_json.RDF_JSON_Encoder)
return body, status, headers
def convert_to_requested_format(document, headers, environ): #TODO: pass in req, instead of environ ???
"""
Convert the specified ``document`` to the format that best matches the Accept header of the specified ``environ``.
The supported formats are:
application/json (default)
text/html
application/rdf+json
application/rdf+json+ce
application/rdf+xml
text/turtle
application/x-turtle
application/ld+json
"""
# In this application architectural style, the only method that ever returns HTML is GET. We never
# return HTML from POST and we do not support application/x-www-form-urlencoded for POST
domain_logic = Domain_Logic(environ)
#TODO: if there is no accept header then use content-type header for post response ... is that what best_match does already?
req = Request(environ)
best_match = req.accept.best_match(['application/json', # default
'text/html',
'application/rdf+json',
'application/rdf+json+ce',
'application/rdf+xml',
'text/turtle',
'application/x-turtle',
'application/ld+json'])
if best_match == 'application/json':
body = json.dumps(document)
else:
graph_url = document.get('_subject')
document = domain_logic.convert_compact_json_to_rdf_json(document) #TODO: doesn't work for containers - ld_contains contents is not being converted
if best_match == 'application/rdf+json+ce':
body = json.dumps(document, cls=rdf_json.RDF_JSON_Encoder)
elif best_match == 'application/rdf+json':
document = rdf_json.normalize(document)
body = json.dumps(document, cls=rdf_json.RDF_JSON_Encoder)
elif best_match == 'application/rdf+xml' or best_match == 'text/turtle' or best_match == 'application/x-turtle' or best_match == 'application/ld+json':
graph = rdfjson_to_graph(rdf_json.normalize(document))
body = serialize_graph(graph, best_match, None) #TODO: should we use wfile instead of string return value?
elif best_match == 'text/html':
document = rdf_json.RDF_JSON_Document(document, graph_url)
body = domain_logic.convert_rdf_json_to_html(document)
if not header_set('Content-Type', headers):
headers.append(('Content-Type', best_match))
if not header_set('Cache-Control', headers):
headers.append(('Cache-Control', 'no-cache'))
headers.append(('Content-length', str(len(body))))
return body, headers
def add_standard_headers(environ, headers):
origin = environ.get('HTTP_ORIGIN')
if origin and not header_set('Access-Control-Allow-Origin', headers):
headers.append(('Access-Control-Allow-Origin', origin))
headers.append(('Access-Control-Allow-Credentials', 'true'))
headers.append(('Access-Control-Expose-Headers', 'Content-Location, Location'))
if ('HTTP_AUTHORIZATION' in environ and environ['HTTP_AUTHORIZATION'].lower().startswith('bearer ')):
# user credentials from another domain were passed by the client
session_key = environ['HTTP_AUTHORIZATION'][len('bearer '):]
add_cookie = True
cookie = Cookie.SimpleCookie()
if ('HTTP_COOKIE' in environ):
cookie.load(environ['HTTP_COOKIE'])
if 'SSSESSIONID' in cookie:
add_cookie = False
elif ('GUEST_AUTHORIZATION' in environ):
# a JWT for an anonymous user URL was generated for an unauthenticated request or the JWT claims expired
session_key = environ['GUEST_AUTHORIZATION']
add_cookie = True
else:
add_cookie = False
if add_cookie:
cookie = Cookie.SimpleCookie()
cookie['SSSESSIONID'] = session_key # SSSESSIONID is 'Site Server Session ID'
cookie['SSSESSIONID']['path'] = '/'
claims = jwt.decode(session_key, verify=False)
cookie['user'] = claims['user']
cookie['user']['path'] = '/'
cookie_headers = map(lambda morsel: ('Set-Cookie', morsel.OutputString()), cookie.values())
headers.extend(cookie_headers)
def header_set(header, headers):
headerl = header.lower()
for item in headers:
if item[0].lower() == headerl:
return True
return False
class Domain_Logic(base.Domain_Logic):
def __init__(self, environ, complete_document_callback=None, change_tracking=False):
self.complete_document_callback = complete_document_callback
super(Domain_Logic, self).__init__(environ, change_tracking)
def create_document(self, document, document_id):
# TODO: access control checking
document = rdf_json.RDF_JSON_Document(document, '')
self.complete_document_for_storage_insertion(document)
self.preprocess_properties_for_storage_insertion(document)
status, location, result = operation_primitives.create_document(self.user, document, self.request_hostname, self.tenant, self.namespace, document_id)
if status == 201:
if self.change_tracking:
self.generate_change_event(base.CREATION_EVENT, location)
# Todo: fix up self.document_id, self.path, self.path_parts to match location url of new document
self.complete_result_document(result)
return status, [('Location', str(location))], result
else:
return status, [], [('', result)]
def execute_query(self, query):
if not self.namespace:
return self.bad_path()
status, result = operation_primitives.execute_query(self.user, query, self.request_hostname, self.tenant, self.namespace)
return status, [], result
def complete_result_document(self, document):
rdf_type = document.get_value(RDF+'type')
if rdf_type and not str(rdf_type).startswith(LDP):
if self.complete_document_callback is not None:
temp_doc = self.convert_rdf_json_to_compact_json(document)
self.complete_document_callback(temp_doc)
#document.clear()
document.update(self.convert_compact_json_to_rdf_json(temp_doc))
return super(Domain_Logic, self).complete_result_document(document)
| |
#!/usr/bin/env python
# encoding: utf-8
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
__all__ = ['ProjectPoints3D', 'ProjectPoints', 'RigidTransform']
import chumpy as ch
from chumpy import depends_on, Ch
from cvwrap import cv2
import numpy as np
import scipy.sparse as sp
from chumpy.utils import row, col
from geometry import Rodrigues
def RigidTransformSlow(**kwargs):
# Returns a Ch object with dterms 'v', 'rt', and 't'
result = Ch(lambda v, rt, t : v.dot(Rodrigues(rt=rt)) + t)
if len(kwargs) > 0:
result.set(**kwargs)
return result
class RigidTransform(Ch):
dterms = 'v', 'rt', 't'
def compute_r(self):
return (cv2.Rodrigues(self.rt.r)[0].dot(self.v.r.T) + col(self.t.r)).T.copy()
def compute_dr_wrt(self, wrt):
if wrt not in (self.v, self.rt, self.t):
return
if wrt is self.t:
if not hasattr(self, '_drt') or self._drt.shape[0] != self.v.r.size:
IS = np.arange(self.v.r.size)
JS = IS % 3
data = np.ones(len(IS))
self._drt = sp.csc_matrix((data, (IS, JS)))
return self._drt
if wrt is self.rt:
rot, rot_dr = cv2.Rodrigues(self.rt.r)
rot_dr = rot_dr.reshape((3,3,3))
dr = np.einsum('abc, zc -> zba', rot_dr, self.v.r).reshape((-1,3))
return dr
if wrt is self.v:
rot = cv2.Rodrigues(self.rt.r)[0]
IS = np.repeat(np.arange(self.v.r.size), 3)
JS = np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 3, axis=0)
data = np.vstack([rot for i in range(self.v.r.size/3)])
result = sp.csc_matrix((data.ravel(), (IS.ravel(), JS.ravel())))
return result
class ProjectPoints(Ch):
dterms = 'v', 'rt', 't', 'f', 'c', 'k'
def is_valid(self):
if any([len(v.r.shape) > 1 for v in [self.rt, self.t, self.f, self.c, self.k]]):
return False, 'rt, t, f, c, and k must be 1D'
if any([v.r.size != 3 for v in [self.rt, self.t]]):
return False, 'rt and t must have size=3'
if any([v.r.size != 2 for v in [self.f, self.c]]):
return False, 'f and c must have size=2'
return True, ''
def compute_r(self):
return self.r_and_derivatives[0].squeeze()
#return self.get_r_and_derivatives(self.v.r, self.rt.r, self.t.r, self.f.r, self.c.r, self.k.r)[0].squeeze()
def compute_dr_wrt(self, wrt):
if wrt not in [self.v, self.rt, self.t, self.f, self.c, self.k]:
return None
j = self.r_and_derivatives[1]
if wrt is self.rt:
return j[:, :3]
elif wrt is self.t:
return j[:, 3:6]
elif wrt is self.f:
return j[:, 6:8]
elif wrt is self.c:
return j[:, 8:10]
elif wrt is self.k:
return j[:, 10:10+self.k.size]
elif wrt is self.v:
rot = cv2.Rodrigues(self.rt.r)[0]
data = np.asarray(j[:, 3:6].dot(rot), order='C').ravel()
IS = np.repeat(np.arange(self.v.r.size*2/3), 3)
JS = np.asarray(np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 2, axis=0), order='C').ravel()
result = sp.csc_matrix((data, (IS, JS)))
return result
def unproject_points(self, uvd, camera_space=False):
cam = ProjectPoints3D(**{k: getattr(self, k) for k in self.dterms if hasattr(self, k)})
try:
xy_undistorted_camspace = cv2.undistortPoints(np.asarray(uvd[:,:2].reshape((1,-1,2)).copy()), np.asarray(cam.camera_mtx), cam.k.r)
xyz_camera_space = np.hstack((xy_undistorted_camspace.squeeze(), col(uvd[:,2])))
xyz_camera_space[:,:2] *= col(xyz_camera_space[:,2]) # scale x,y by z
if camera_space:
return xyz_camera_space
other_answer = xyz_camera_space - row(cam.view_mtx[:,3]) # translate
result = other_answer.dot(cam.view_mtx[:,:3]) # rotate
except: # slow way, probably not so good. But doesn't require cv2.undistortPoints.
cam.v = np.ones_like(uvd)
ch.minimize(cam - uvd, x0=[cam.v], method='dogleg', options={'disp': 0})
result = cam.v.r
return result
def unproject_depth_image(self, depth_image, camera_space=False):
us = np.arange(depth_image.size) % depth_image.shape[1]
vs = np.arange(depth_image.size) // depth_image.shape[1]
ds = depth_image.ravel()
uvd = ch.array(np.vstack((us.ravel(), vs.ravel(), ds.ravel())).T)
xyz = self.unproject_points(uvd, camera_space=camera_space)
return xyz.reshape((depth_image.shape[0], depth_image.shape[1], -1))
@depends_on('f','c')
def camera_mtx(self):
return np.array([[self.f.r[0], 0, self.c.r[0]],[0., self.f.r[1], self.c.r[1]],[0.,0.,1.]], dtype=np.float64)
@depends_on('t', 'rt')
def view_mtx(self):
R = cv2.Rodrigues(self.rt.r)[0]
return np.hstack((R,col(self.t.r)))
@depends_on('v', 'rt', 't', 'f', 'c', 'k')
def r_and_derivatives(self):
v = self.v.r.reshape((-1,3)).copy()
return cv2.projectPoints(v, self.rt.r, self.t.r, self.camera_mtx, self.k.r)
@property
def view_matrix(self):
R = cv2.Rodrigues(self.rt.r)[0]
return np.hstack((R, col(self.t.r)))
class ProjectPoints3D(ProjectPoints):
dterms = 'v', 'rt', 't', 'f', 'c', 'k'
def compute_r(self):
result = ProjectPoints.compute_r(self)
return np.hstack((result, col(self.z_coords.r)))
@property
def z_coords(self):
assert(self.v.r.shape[1]==3)
return RigidTransform(v=self.v, rt=self.rt, t=self.t)[:,2]
def compute_dr_wrt(self, wrt):
result = ProjectPoints.compute_dr_wrt(self, wrt)
if result is None:
return None
if sp.issparse(result):
drz = self.z_coords.dr_wrt(wrt).tocoo()
result = result.tocoo()
result.row = result.row*3/2
IS = np.concatenate((result.row, drz.row*3+2))
JS = np.concatenate((result.col, drz.col))
data = np.concatenate((result.data, drz.data))
result = sp.csc_matrix((data, (IS, JS)), shape=(self.v.r.size, wrt.r.size))
else:
bigger = np.zeros((result.shape[0]/2, 3, result.shape[1]))
bigger[:, :2, :] = result.reshape((-1, 2, result.shape[-1]))
drz = self.z_coords.dr_wrt(wrt)
if drz is not None:
if sp.issparse(drz):
drz = drz.todense()
bigger[:,2,:] = drz.reshape(bigger[:,2,:].shape)
result = bigger.reshape((-1, bigger.shape[-1]))
return result
def main():
import unittest
from test_camera import TestCamera
suite = unittest.TestLoader().loadTestsFromTestCase(TestCamera)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
| |
import warnings
from typing import Union, List, Iterable, Iterator, TYPE_CHECKING, Callable
from typing import Optional
from pathlib import Path
import srsly
from .. import util
from .augment import dont_augment
from .example import Example
from ..errors import Warnings, Errors
from ..tokens import DocBin, Doc
from ..vocab import Vocab
if TYPE_CHECKING:
# This lets us add type hints for mypy etc. without causing circular imports
from ..language import Language # noqa: F401
FILE_TYPE = ".spacy"
@util.registry.readers("spacy.Corpus.v1")
def create_docbin_reader(
path: Optional[Path],
gold_preproc: bool,
max_length: int = 0,
limit: int = 0,
augmenter: Optional[Callable] = None,
) -> Callable[["Language"], Iterable[Example]]:
if path is None:
raise ValueError(Errors.E913)
util.logger.debug(f"Loading corpus from path: {path}")
return Corpus(
path,
gold_preproc=gold_preproc,
max_length=max_length,
limit=limit,
augmenter=augmenter,
)
@util.registry.readers("spacy.JsonlCorpus.v1")
def create_jsonl_reader(
path: Path, min_length: int = 0, max_length: int = 0, limit: int = 0
) -> Callable[["Language"], Iterable[Doc]]:
return JsonlCorpus(path, min_length=min_length, max_length=max_length, limit=limit)
@util.registry.readers("spacy.read_labels.v1")
def read_labels(path: Path, *, require: bool = False):
# I decided not to give this a generic name, because I don't want people to
# use it for arbitrary stuff, as I want this require arg with default False.
if not require and not path.exists():
return None
return srsly.read_json(path)
def walk_corpus(path: Union[str, Path], file_type) -> List[Path]:
path = util.ensure_path(path)
if not path.is_dir() and path.parts[-1].endswith(file_type):
return [path]
orig_path = path
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts and path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif path.parts[-1].endswith(file_type):
locs.append(path)
if len(locs) == 0:
warnings.warn(Warnings.W090.format(path=orig_path, format=file_type))
# It's good to sort these, in case the ordering messes up a cache.
locs.sort()
return locs
class Corpus:
"""Iterate Example objects from a file or directory of DocBin (.spacy)
formatted data files.
path (Path): The directory or filename to read from.
gold_preproc (bool): Whether to set up the Example object with gold-standard
sentences and tokens for the predictions. Gold preprocessing helps
the annotations align to the tokenization, and may result in sequences
of more consistent length. However, it may reduce run-time accuracy due
to train/test skew. Defaults to False.
max_length (int): Maximum document length. Longer documents will be
split into sentences, if sentence boundaries are available. Defaults to
0, which indicates no limit.
limit (int): Limit corpus to a subset of examples, e.g. for debugging.
Defaults to 0, which indicates no limit.
augment (Callable[Example, Iterable[Example]]): Optional data augmentation
function, to extrapolate additional examples from your annotations.
DOCS: https://spacy.io/api/corpus
"""
def __init__(
self,
path: Union[str, Path],
*,
limit: int = 0,
gold_preproc: bool = False,
max_length: int = 0,
augmenter: Optional[Callable] = None,
) -> None:
self.path = util.ensure_path(path)
self.gold_preproc = gold_preproc
self.max_length = max_length
self.limit = limit
self.augmenter = augmenter if augmenter is not None else dont_augment
def __call__(self, nlp: "Language") -> Iterator[Example]:
"""Yield examples from the data.
nlp (Language): The current nlp object.
YIELDS (Example): The examples.
DOCS: https://spacy.io/api/corpus#call
"""
ref_docs = self.read_docbin(nlp.vocab, walk_corpus(self.path, FILE_TYPE))
if self.gold_preproc:
examples = self.make_examples_gold_preproc(nlp, ref_docs)
else:
examples = self.make_examples(nlp, ref_docs)
for real_eg in examples:
for augmented_eg in self.augmenter(nlp, real_eg):
yield augmented_eg
def _make_example(
self, nlp: "Language", reference: Doc, gold_preproc: bool
) -> Example:
if gold_preproc or reference.has_unknown_spaces:
return Example(
Doc(
nlp.vocab,
words=[word.text for word in reference],
spaces=[bool(word.whitespace_) for word in reference],
),
reference,
)
else:
return Example(nlp.make_doc(reference.text), reference)
def make_examples(
self, nlp: "Language", reference_docs: Iterable[Doc]
) -> Iterator[Example]:
for reference in reference_docs:
if len(reference) == 0:
continue
elif self.max_length == 0 or len(reference) < self.max_length:
yield self._make_example(nlp, reference, False)
elif reference.is_sentenced:
for ref_sent in reference.sents:
if len(ref_sent) == 0:
continue
elif self.max_length == 0 or len(ref_sent) < self.max_length:
yield self._make_example(nlp, ref_sent.as_doc(), False)
def make_examples_gold_preproc(
self, nlp: "Language", reference_docs: Iterable[Doc]
) -> Iterator[Example]:
for reference in reference_docs:
if reference.is_sentenced:
ref_sents = [sent.as_doc() for sent in reference.sents]
else:
ref_sents = [reference]
for ref_sent in ref_sents:
eg = self._make_example(nlp, ref_sent, True)
if len(eg.x):
yield eg
def read_docbin(
self, vocab: Vocab, locs: Iterable[Union[str, Path]]
) -> Iterator[Doc]:
""" Yield training examples as example dicts """
i = 0
for loc in locs:
loc = util.ensure_path(loc)
if loc.parts[-1].endswith(FILE_TYPE):
doc_bin = DocBin().from_disk(loc)
docs = doc_bin.get_docs(vocab)
for doc in docs:
if len(doc):
yield doc
i += 1
if self.limit >= 1 and i >= self.limit:
break
class JsonlCorpus:
"""Iterate Doc objects from a file or directory of jsonl
formatted raw text files.
path (Path): The directory or filename to read from.
min_length (int): Minimum document length (in tokens). Shorter documents
will be skipped. Defaults to 0, which indicates no limit.
max_length (int): Maximum document length (in tokens). Longer documents will
be skipped. Defaults to 0, which indicates no limit.
limit (int): Limit corpus to a subset of examples, e.g. for debugging.
Defaults to 0, which indicates no limit.
DOCS: https://spacy.io/api/corpus#jsonlcorpus
"""
file_type = "jsonl"
def __init__(
self,
path: Union[str, Path],
*,
limit: int = 0,
min_length: int = 0,
max_length: int = 0,
) -> None:
self.path = util.ensure_path(path)
self.min_length = min_length
self.max_length = max_length
self.limit = limit
def __call__(self, nlp: "Language") -> Iterator[Example]:
"""Yield examples from the data.
nlp (Language): The current nlp object.
YIELDS (Example): The example objects.
DOCS: https://spacy.io/api/corpus#jsonlcorpus-call
"""
for loc in walk_corpus(self.path, ".jsonl"):
records = srsly.read_jsonl(loc)
for record in records:
doc = nlp.make_doc(record["text"])
if self.min_length >= 1 and len(doc) < self.min_length:
continue
elif self.max_length >= 1 and len(doc) >= self.max_length:
continue
else:
words = [w.text for w in doc]
spaces = [bool(w.whitespace_) for w in doc]
# We don't *need* an example here, but it seems nice to
# make it match the Corpus signature.
yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces))
| |
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from random import choice, randint
import support as sp
from common import DEBUG, CONFIG
class FlagPage(object):
def __init__(self, flag_page):
self.flag_page = flag_page
self.cur_idx = 0
def next_byte(self):
b = self.flag_page[self.cur_idx]
if DEBUG:
print " fp[{0}] = 0x{1}".format(self.cur_idx, ord(b))
self.cur_idx = (self.cur_idx + 1) % 4096
return b
class Resort(object):
def __init__(self, r_size=3, min_altitude=5000, max_altitude=15000, flag_page=''):
self.size = r_size
self.min_altitude = min_altitude
self.max_altitude = max_altitude
self.deciders = []
self.rider_count = 0
self.riders = []
self.trails = []
self.lifts = []
self.flag_page = FlagPage(flag_page)
def run_sim(self, steps):
'''
Run the simulation steps iterations.
'''
if DEBUG:
self.route()
min_decider = self.get_decider_min()
min_decider.add_riders(self.riders)
if DEBUG:
print "start sim with {0} riders on D{1}".format(len(self.riders), min_decider.id)
self.riders = []
for count in range(steps):
if DEBUG:
print " step {0}".format(count)
# deciders
if DEBUG:
print "{0} deciders".format(len(self.deciders))
for d in self.deciders:
d.step(self.flag_page, self.trails, self.lifts)
# trails
if DEBUG:
print "{0} trails".format(len(self.trails))
for t in self.trails:
t.step(self.deciders)
# lifts
if DEBUG:
print "{0} lifts".format(len(self.lifts))
for l in self.lifts:
l.step(self.deciders)
def lift_stats_buffer(self):
'''
Examine all lifts and collect their stats into a buffer.
For each lift, get id and rider_total
'''
buf = ''
for l in self.lifts:
if DEBUG:
print "L{0} had {1} riders".format(l.id, l.rider_total)
buf += sp.pack_single_uint32(l.id)
buf += sp.pack_single_uint32(l.rider_total)
return buf
def trail_stats_buffer(self):
'''
Examine all trails and collect their stats into a buffer.
For each trail, get id and rider_total
'''
buf = ''
for t in self.trails:
if DEBUG:
print "T{0} had {1} riders".format(t.id, t.rider_total)
buf += sp.pack_single_uint32(t.id)
buf += sp.pack_single_uint32(t.rider_total)
return buf
def rider_stats_buffer(self):
'''
Examine all riders and collect their stats into a buffer.
For each rider, get id and energy_level
'''
buf = ''
for d in self.deciders:
for r in d.rider_queue:
buf += r.get_stats()
for r in d.quitters:
buf += r.get_stats()
for l in self.lifts:
for r in l.riders:
buf += r.get_stats()
for c in l.chairs:
for r in c.riders:
buf += r.get_stats()
for t in self.trails:
for r in t.riders:
buf += r.get_stats()
for r in self.riders:
buf += r.get_stats()
return buf
def resort_reset(self):
'''
Get all riders from lifts, trails, and deciders, and return them to self.riders.
Reset deciders, riders, trails, lifts to initial state.
'''
for d in self.deciders:
self.riders += d.decider_reset()
for l in self.lifts:
self.riders += l.lift_reset()
for t in self.trails:
self.riders += t.trail_reset()
for r in self.riders:
r.rider_reset()
def generate_load_digraph_buffer(self):
'''
decider count, lift count, trail count
decider tuples (ID, altitude)
lift tuples (ID, start decider, end decider, chair_count, chair_capacity)
trail tuples (ID, start decider, end decider, difficulty, length)
'''
lb = ''
lb += sp.pack_single_uint32(len(self.deciders))
lb += sp.pack_single_uint32(len(self.lifts))
lb += sp.pack_single_uint32(len(self.trails))
for d in self.deciders:
lb += d.generate_load_buffer()
for l in self.lifts:
lb += l.generate_load_buffer()
for t in self.trails:
lb += t.generate_load_buffer()
return lb
def get_decider_min(self):
return min(self.deciders, key=lambda d: d.altitude)
def get_decider_by_id(self, d_id):
for decider in self.deciders:
if decider.id == d_id:
return decider
return None
def generate_random_layout(self, t_len=randint(10, 50), t_diffs=[1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5], c_count=randint(10, 50)):
if DEBUG:
print "generate_random_layout()"
decider_altitudes = set()
for d_id in range(self.size + 1):
d = None
if d_id == 0:
d = Decider(d_id, self.min_altitude)
else:
d = Decider(d_id, randint(self.min_altitude, self.max_altitude))
# no duplicate altitudes
while d.altitude in decider_altitudes:
d = Decider(d_id, randint(self.min_altitude, self.max_altitude))
decider_altitudes.add(d.altitude)
self.deciders.append(d)
if DEBUG:
print " [d{0}] added".format(d.id)
# all deciders sorted by altitude from low to high
alt_sorted_deciders = sorted(self.deciders, key=lambda x: x.altitude)
enum_alt_sorted_deciders = list(enumerate(alt_sorted_deciders))
lift_start_decider_idxs = set()
lift_end_decider_idxs = set()
lift_end_decider_idxs_remaining = [i[0] for i in enum_alt_sorted_deciders[1:]]
for l_id in range(self.size):
l = Lift(l_id)
l.chair_count = c_count
# l.chair_count = 3
l.chair_capacity = choice([2, 4])
l.gen_chairs()
start_decider_tuple = None
if 0 == l_id:
start_decider_tuple = enum_alt_sorted_deciders[0]
else:
start_decider_tuple = choice(enum_alt_sorted_deciders[:-1])
remaining_usable_idxs = [i for i in lift_end_decider_idxs_remaining if i > start_decider_tuple[0]]
if [] != remaining_usable_idxs:
end_decider_tuple_idx = choice(remaining_usable_idxs)
lift_end_decider_idxs_remaining.remove(end_decider_tuple_idx)
else:
end_decider_tuple_idx = choice([i[0] for i in enum_alt_sorted_deciders if i[0] > start_decider_tuple[0]])
end_decider_tuple = enum_alt_sorted_deciders[end_decider_tuple_idx]
l.start_decider = start_decider_tuple[1].id
l.end_decider = end_decider_tuple[1].id
lift_start_decider_idxs.add(start_decider_tuple[0])
lift_end_decider_idxs.add(end_decider_tuple[0])
start_decider = self.get_decider_by_id(l.start_decider)
start_decider.transport_options.append(l)
self.lifts.append(l)
if DEBUG:
print " [l{0}] added".format(l.id)
# lift end is used as the starting point of a trail
# lift start is used as ending point of a trail
trail_end_decider_idxs = sorted(list(lift_start_decider_idxs))
trail_start_decider_idxs = sorted(list(lift_end_decider_idxs))
# want to try to use up one of each of these before randomly selecting to help make closed graph
trail_end_decider_idxs_remaining = list(trail_end_decider_idxs)
for t_id in range(self.size * 2):
t = Trail(t_id)
t.difficulty = choice(t_diffs)
t.length = t_len
# t.length = 3
start_decider_tuple = None
if t_id < len(trail_start_decider_idxs):
d_tuple_idx = trail_start_decider_idxs[t_id]
start_decider_tuple = enum_alt_sorted_deciders[d_tuple_idx]
else:
start_decider_tuple = enum_alt_sorted_deciders[choice(trail_start_decider_idxs)]
remaining_usable_idxs = [i for i in trail_end_decider_idxs_remaining if i < start_decider_tuple[0]]
end_decider_tuple_idx = 0
if [] != remaining_usable_idxs:
end_decider_tuple_idx = choice(remaining_usable_idxs)
trail_end_decider_idxs_remaining.remove(end_decider_tuple_idx)
else:
end_decider_tuple_idx = choice([i for i in trail_end_decider_idxs if i < start_decider_tuple[0]])
end_decider_tuple = enum_alt_sorted_deciders[end_decider_tuple_idx]
t.start_decider = start_decider_tuple[1].id
t.end_decider = end_decider_tuple[1].id
start_decider = self.get_decider_by_id(t.start_decider)
start_decider.transport_options.append(t)
self.trails.append(t)
if DEBUG:
print " [t{0}] added".format(t.id)
def route(self):
for d in self.deciders:
print d.route()
for l in self.lifts:
print l.route()
for t in self.trails:
print t.route()
def __str__(self):
return "Resort(size={0},min_altitude={1},max_altitude={2},\ndeciders={3},\nriders={4},\ntrails={5},\nlifts={6})".format(self.size, self.min_altitude, self.max_altitude, self.deciders, self.riders, self.trails, self.lifts)
def __repr__(self):
return self.__str__()
class Decider(object):
def __init__(self, d_id=0, altitude=0):
self.id = d_id
self.altitude = altitude
self.transport_options = []
self.rider_queue = []
self.quitters = []
def add_riders(self, riders):
for r in riders:
self.rider_queue.append(r)
if DEBUG:
print "D{0} gained R{1}".format(self.id, r.id)
def select_option(self, flag_page):
idx = ord(flag_page.next_byte()) % len(self.transport_options)
if DEBUG:
print "D{0} selected option idx {1} = {2}".format(self.id, idx, self.transport_options[idx])
return self.transport_options[idx]
def step(self, flag_page, trails, lifts):
if [] == self.transport_options:
if DEBUG:
print "D{0} has no transport options".format(self.id)
return
riders = len(self.rider_queue)
if DEBUG:
print "D{0} rider's in queue {1} quitters {2}".format(self.id, len(self.rider_queue), len(self.quitters))
for _ in range(riders):
op = self.select_option(flag_page)
r = self.rider_queue.pop(0)
if isinstance(op, Trail) and r.energy_level < op.difficulty:
self.quitters.append(r)
if DEBUG:
print "D{0} rider {1} quit".format(self.id, r.id)
continue
if isinstance(op, Lift) and r.energy_level == 0:
self.quitters.append(r)
if DEBUG:
print "D{0} rider {1} quit".format(self.id, r.id)
continue
op.add_riders([r])
# if DEBUG:
# print "D{0} op: {1}, r: {2}".format(self.id, op, r)
if DEBUG:
print "D{0}: moved R{1} to {2}{3}".format(self.id, r.id, type(op), op.id)
def decider_reset(self):
riders = self.rider_queue
riders += self.quitters
self.rider_queue = []
self.quitters = []
return riders
def generate_load_buffer(self):
'''
decider tuple (ID, altitude)
'''
return sp.pack_single_uint32(self.id) + sp.pack_single_uint32(self.altitude)
def route(self):
return "Decider(id={0},altitude={1},transport_options={2})".format(self.id, self.altitude, ["{0}{1}".format(type(o), o.id) for o in self.transport_options])
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash( ("id", self.id, "altitude", self.altitude))
def __str__(self):
return "Decider(id={0},altitude={1},transport_options={2},rider_queue={3},quitters={4})".format(self.id, self.altitude, self.transport_options, self.rider_queue, self.quitters)
def __repr__(self):
return self.__str__()
class Rider(object):
SKIER = 0
BOARDER = 1
def __init__(self, r_id=0, health_code=0xFFFFFFFF):
self.id = r_id
self.type = 0
self.energy_level = 0
self.initial_energy_level = 0
self.trail_count = 0
self.trail_distance = 0
self.health_code = health_code
def get_stats(self):
if DEBUG:
print self.__str__()
return sp.pack_single_uint32(self.id) + sp.pack_single_uint32(self.energy_level)
def rider_reset(self):
self.energy_level = self.initial_energy_level
self.trail_count = 0
self.trail_distance = 0
if DEBUG:
print "R{0} reset".format(self.id)
def generate_load_buffer(self):
'''
rider tuple (ID, type, energy_level, health_code)
'''
return sp.pack_single_uint32(self.id) + sp.pack_single_uint32(self.type) + \
sp.pack_single_uint32(self.energy_level) + sp.pack_single_uint32(self.health_code)
def gen_stats(self, e_level=randint(50, 100)):
self.type = choice([self.SKIER, self.BOARDER])
self.energy_level = e_level
self.initial_energy_level = self.energy_level
def __str__(self):
return "Rider(id={0},type={1},energy_level={2},trail_count={3})".format(self.id, self.type, self.energy_level, self.trail_count)
def __repr__(self):
return self.__str__()
class Trail(object):
def __init__(self, t_id=0):
self.id = t_id
self.difficulty = 0
self.length = 0
self.rider_total = 0
self.riders = []
self.start_decider = None
self.end_decider = None
def add_riders(self, riders):
self.rider_total += 1
self.riders += riders
if DEBUG:
for r in riders:
print "T{0} gained R{1}".format(self.id, r.id)
def step(self, deciders):
end_decider = deciders[self.end_decider]
# move completed riders to end decider
if [] == self.riders:
return
done = [r for r in self.riders if self.length == r.trail_distance]
self.riders = [r for r in self.riders if self.length != r.trail_distance]
done_cnt = len(done)
for r in done:
r.trail_distance = 0
r.trail_count += 1
r.energy_level -= self.difficulty
end_decider.add_riders([r])
if DEBUG:
if 0 != done_cnt:
print "T{0} moved {1} riders to D{2}".format(self.id, done_cnt, end_decider.id)
# increment trail_distance on rest of riders
for r in self.riders:
r.trail_distance += 1
if DEBUG:
if 0 != len(self.riders):
print "T{0} update {1} riders' distance".format(self.id, len(self.riders))
def trail_reset(self):
riders = self.riders
self.riders = []
self.rider_total = 0
return riders
def generate_load_buffer(self):
'''
trail tuple (ID, start decider, end decider, difficulty, length)
'''
return sp.pack_single_uint32(self.id) + sp.pack_single_uint32(self.start_decider) + \
sp.pack_single_uint32(self.end_decider) + sp.pack_single_uint32(self.difficulty) + \
sp.pack_single_uint32(self.length)
def route(self):
return "Trail(id={0},D{1}->D{2})".format(self.id, self.start_decider, self.end_decider)
def __str__(self):
return "Trail(id={0},difficulty={1},rider_total={2},riders={3},start_decider={4},end_decider={5})".format(self.id, self.difficulty, self.rider_total, self.riders, self.start_decider, self.end_decider)
def __repr__(self):
return self.__str__()
class Chair(object):
def __init__(self, c_id, capacity=2):
self.id = c_id
self.capacity = capacity
self.riders = []
def unload(self):
riders = self.riders
if DEBUG:
for r in self.riders:
print " C{0} unloaded R{1}".format(self.id, r.id)
self.riders = []
return riders
def load(self, r):
self.riders.append(r)
if DEBUG:
print " C{0} loaded R{1}".format(self.id, r.id)
class Lift(object):
def __init__(self, l_id=0):
self.id = l_id
self.chair_count = 0
self.chair_capacity = 0
self.rider_total = 0
self.riders = []
self.chairs = []
self.start_decider = None
self.end_decider = None
self.c_embark = 0
self.c_disembark = 0
def add_riders(self, riders):
self.riders += riders
if DEBUG:
for r in riders:
print "L{0} gained R{1}".format(self.id, r.id)
def gen_chairs(self):
for c_id in range(self.chair_count):
self.chairs.append(Chair(c_id, self.chair_capacity))
self.c_embark = 0
self.c_disembark = len(self.chairs)/2
def step(self, deciders):
if [] == self.chairs:
return
if DEBUG:
print "L{0} c_disembark {1} c_embark {2}".format(self.id, self.c_disembark, self.c_embark)
''' move riders from c_disembark chair to end_decider's rider_queue '''
end_decider = deciders[self.end_decider]
# move riders from c_disembark to end decider
d_riders = self.chairs[self.c_disembark].unload()
if 0 != len(d_riders):
end_decider.add_riders(d_riders)
if DEBUG:
print "L{0} unloaded {1} riders to D{2}".format(self.id, len(d_riders), end_decider.id)
# increment c_disembark
self.c_disembark = (self.c_disembark + 1) % len(self.chairs)
''' move riders from riders to c_embark chair '''
loaded = 0
for _ in range(self.chair_capacity):
if [] == self.riders:
break;
r = self.riders.pop(0)
self.chairs[self.c_embark].load(r)
self.rider_total += 1
loaded += 1
if DEBUG:
if 0 != loaded:
print "L{0} loaded {1} riders onto chair {2}".format(self.id, len(self.chairs[self.c_embark].riders), self.chairs[self.c_embark].id)
# increment c_embark
self.c_embark = (self.c_embark + 1) % len(self.chairs)
def lift_reset(self):
all_riders = []
for c in self.chairs:
all_riders += c.unload()
all_riders += self.riders
self.riders = []
self.rider_total = 0
self.c_embark = 0
self.c_disembark = len(self.chairs)/2
return all_riders
def generate_load_buffer(self):
'''
lift tuple (ID, start decider, end decider, chair_count, chair_capacity)
'''
return sp.pack_single_uint32(self.id) + sp.pack_single_uint32(self.start_decider) + \
sp.pack_single_uint32(self.end_decider) + sp.pack_single_uint32(self.chair_count) + \
sp.pack_single_uint32(self.chair_capacity)
def route(self):
return "Lift(id={0},D{1}->D{2})".format(self.id, self.start_decider, self.end_decider)
def __str__(self):
return "Lift(id={0},chair_count={1},chair_capacity={2},rider_total={3},riders={4},start_decider={5},end_decider={6})".format(self.id, self.chair_count, self.chair_capacity, self.rider_total, self.riders, self.start_decider, self.end_decider)
def __repr__(self):
return self.__str__()
if __name__ == '__main__':
r = Resort()
r.generate_random_layout()
r.route()
| |
#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import sys
import ctypes
import weakref
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning, ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int):
type_ = type_ * size_or_initializer
return _new_value(type_)
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, lock=None):
'''
Return a synchronization wrapper for a Value
'''
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % list(kwds.keys()))
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec(template % ((name,)*7), d)
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AuthenticationV1beta1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_token_review(self, body, **kwargs):
"""
create a TokenReview
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_token_review(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1beta1TokenReview body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1TokenReview
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_token_review_with_http_info(body, **kwargs)
else:
(data) = self.create_token_review_with_http_info(body, **kwargs)
return data
def create_token_review_with_http_info(self, body, **kwargs):
"""
create a TokenReview
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_token_review_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param V1beta1TokenReview body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1TokenReview
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_token_review" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_token_review`")
collection_formats = {}
resource_path = '/apis/authentication.k8s.io/v1beta1/tokenreviews'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1TokenReview',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_resources_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/authentication.k8s.io/v1beta1/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EnvelopeDocument(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'added_recipient_ids': 'list[str]',
'attachment_tab_id': 'str',
'authoritative_copy': 'str',
'authoritative_copy_metadata': 'PropertyMetadata',
'available_document_types': 'list[SignatureType]',
'contains_pdf_form_fields': 'str',
'display': 'str',
'display_metadata': 'PropertyMetadata',
'document_fields': 'list[NameValue]',
'document_id': 'str',
'document_id_guid': 'str',
'error_details': 'ErrorDetails',
'include_in_download': 'str',
'include_in_download_metadata': 'PropertyMetadata',
'name': 'str',
'name_metadata': 'PropertyMetadata',
'order': 'str',
'pages': 'list[Page]',
'signer_must_acknowledge': 'str',
'signer_must_acknowledge_metadata': 'PropertyMetadata',
'size_bytes': 'str',
'template_locked': 'str',
'template_required': 'str',
'type': 'str',
'uri': 'str'
}
attribute_map = {
'added_recipient_ids': 'addedRecipientIds',
'attachment_tab_id': 'attachmentTabId',
'authoritative_copy': 'authoritativeCopy',
'authoritative_copy_metadata': 'authoritativeCopyMetadata',
'available_document_types': 'availableDocumentTypes',
'contains_pdf_form_fields': 'containsPdfFormFields',
'display': 'display',
'display_metadata': 'displayMetadata',
'document_fields': 'documentFields',
'document_id': 'documentId',
'document_id_guid': 'documentIdGuid',
'error_details': 'errorDetails',
'include_in_download': 'includeInDownload',
'include_in_download_metadata': 'includeInDownloadMetadata',
'name': 'name',
'name_metadata': 'nameMetadata',
'order': 'order',
'pages': 'pages',
'signer_must_acknowledge': 'signerMustAcknowledge',
'signer_must_acknowledge_metadata': 'signerMustAcknowledgeMetadata',
'size_bytes': 'sizeBytes',
'template_locked': 'templateLocked',
'template_required': 'templateRequired',
'type': 'type',
'uri': 'uri'
}
def __init__(self, added_recipient_ids=None, attachment_tab_id=None, authoritative_copy=None, authoritative_copy_metadata=None, available_document_types=None, contains_pdf_form_fields=None, display=None, display_metadata=None, document_fields=None, document_id=None, document_id_guid=None, error_details=None, include_in_download=None, include_in_download_metadata=None, name=None, name_metadata=None, order=None, pages=None, signer_must_acknowledge=None, signer_must_acknowledge_metadata=None, size_bytes=None, template_locked=None, template_required=None, type=None, uri=None): # noqa: E501
"""EnvelopeDocument - a model defined in Swagger""" # noqa: E501
self._added_recipient_ids = None
self._attachment_tab_id = None
self._authoritative_copy = None
self._authoritative_copy_metadata = None
self._available_document_types = None
self._contains_pdf_form_fields = None
self._display = None
self._display_metadata = None
self._document_fields = None
self._document_id = None
self._document_id_guid = None
self._error_details = None
self._include_in_download = None
self._include_in_download_metadata = None
self._name = None
self._name_metadata = None
self._order = None
self._pages = None
self._signer_must_acknowledge = None
self._signer_must_acknowledge_metadata = None
self._size_bytes = None
self._template_locked = None
self._template_required = None
self._type = None
self._uri = None
self.discriminator = None
if added_recipient_ids is not None:
self.added_recipient_ids = added_recipient_ids
if attachment_tab_id is not None:
self.attachment_tab_id = attachment_tab_id
if authoritative_copy is not None:
self.authoritative_copy = authoritative_copy
if authoritative_copy_metadata is not None:
self.authoritative_copy_metadata = authoritative_copy_metadata
if available_document_types is not None:
self.available_document_types = available_document_types
if contains_pdf_form_fields is not None:
self.contains_pdf_form_fields = contains_pdf_form_fields
if display is not None:
self.display = display
if display_metadata is not None:
self.display_metadata = display_metadata
if document_fields is not None:
self.document_fields = document_fields
if document_id is not None:
self.document_id = document_id
if document_id_guid is not None:
self.document_id_guid = document_id_guid
if error_details is not None:
self.error_details = error_details
if include_in_download is not None:
self.include_in_download = include_in_download
if include_in_download_metadata is not None:
self.include_in_download_metadata = include_in_download_metadata
if name is not None:
self.name = name
if name_metadata is not None:
self.name_metadata = name_metadata
if order is not None:
self.order = order
if pages is not None:
self.pages = pages
if signer_must_acknowledge is not None:
self.signer_must_acknowledge = signer_must_acknowledge
if signer_must_acknowledge_metadata is not None:
self.signer_must_acknowledge_metadata = signer_must_acknowledge_metadata
if size_bytes is not None:
self.size_bytes = size_bytes
if template_locked is not None:
self.template_locked = template_locked
if template_required is not None:
self.template_required = template_required
if type is not None:
self.type = type
if uri is not None:
self.uri = uri
@property
def added_recipient_ids(self):
"""Gets the added_recipient_ids of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The added_recipient_ids of this EnvelopeDocument. # noqa: E501
:rtype: list[str]
"""
return self._added_recipient_ids
@added_recipient_ids.setter
def added_recipient_ids(self, added_recipient_ids):
"""Sets the added_recipient_ids of this EnvelopeDocument.
# noqa: E501
:param added_recipient_ids: The added_recipient_ids of this EnvelopeDocument. # noqa: E501
:type: list[str]
"""
self._added_recipient_ids = added_recipient_ids
@property
def attachment_tab_id(self):
"""Gets the attachment_tab_id of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The attachment_tab_id of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._attachment_tab_id
@attachment_tab_id.setter
def attachment_tab_id(self, attachment_tab_id):
"""Sets the attachment_tab_id of this EnvelopeDocument.
# noqa: E501
:param attachment_tab_id: The attachment_tab_id of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._attachment_tab_id = attachment_tab_id
@property
def authoritative_copy(self):
"""Gets the authoritative_copy of this EnvelopeDocument. # noqa: E501
Specifies the Authoritative copy feature. If set to true the Authoritative copy feature is enabled. # noqa: E501
:return: The authoritative_copy of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._authoritative_copy
@authoritative_copy.setter
def authoritative_copy(self, authoritative_copy):
"""Sets the authoritative_copy of this EnvelopeDocument.
Specifies the Authoritative copy feature. If set to true the Authoritative copy feature is enabled. # noqa: E501
:param authoritative_copy: The authoritative_copy of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._authoritative_copy = authoritative_copy
@property
def authoritative_copy_metadata(self):
"""Gets the authoritative_copy_metadata of this EnvelopeDocument. # noqa: E501
:return: The authoritative_copy_metadata of this EnvelopeDocument. # noqa: E501
:rtype: PropertyMetadata
"""
return self._authoritative_copy_metadata
@authoritative_copy_metadata.setter
def authoritative_copy_metadata(self, authoritative_copy_metadata):
"""Sets the authoritative_copy_metadata of this EnvelopeDocument.
:param authoritative_copy_metadata: The authoritative_copy_metadata of this EnvelopeDocument. # noqa: E501
:type: PropertyMetadata
"""
self._authoritative_copy_metadata = authoritative_copy_metadata
@property
def available_document_types(self):
"""Gets the available_document_types of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The available_document_types of this EnvelopeDocument. # noqa: E501
:rtype: list[SignatureType]
"""
return self._available_document_types
@available_document_types.setter
def available_document_types(self, available_document_types):
"""Sets the available_document_types of this EnvelopeDocument.
# noqa: E501
:param available_document_types: The available_document_types of this EnvelopeDocument. # noqa: E501
:type: list[SignatureType]
"""
self._available_document_types = available_document_types
@property
def contains_pdf_form_fields(self):
"""Gets the contains_pdf_form_fields of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The contains_pdf_form_fields of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._contains_pdf_form_fields
@contains_pdf_form_fields.setter
def contains_pdf_form_fields(self, contains_pdf_form_fields):
"""Sets the contains_pdf_form_fields of this EnvelopeDocument.
# noqa: E501
:param contains_pdf_form_fields: The contains_pdf_form_fields of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._contains_pdf_form_fields = contains_pdf_form_fields
@property
def display(self):
"""Gets the display of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The display of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._display
@display.setter
def display(self, display):
"""Sets the display of this EnvelopeDocument.
# noqa: E501
:param display: The display of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._display = display
@property
def display_metadata(self):
"""Gets the display_metadata of this EnvelopeDocument. # noqa: E501
:return: The display_metadata of this EnvelopeDocument. # noqa: E501
:rtype: PropertyMetadata
"""
return self._display_metadata
@display_metadata.setter
def display_metadata(self, display_metadata):
"""Sets the display_metadata of this EnvelopeDocument.
:param display_metadata: The display_metadata of this EnvelopeDocument. # noqa: E501
:type: PropertyMetadata
"""
self._display_metadata = display_metadata
@property
def document_fields(self):
"""Gets the document_fields of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The document_fields of this EnvelopeDocument. # noqa: E501
:rtype: list[NameValue]
"""
return self._document_fields
@document_fields.setter
def document_fields(self, document_fields):
"""Sets the document_fields of this EnvelopeDocument.
# noqa: E501
:param document_fields: The document_fields of this EnvelopeDocument. # noqa: E501
:type: list[NameValue]
"""
self._document_fields = document_fields
@property
def document_id(self):
"""Gets the document_id of this EnvelopeDocument. # noqa: E501
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute. # noqa: E501
:return: The document_id of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""Sets the document_id of this EnvelopeDocument.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute. # noqa: E501
:param document_id: The document_id of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._document_id = document_id
@property
def document_id_guid(self):
"""Gets the document_id_guid of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The document_id_guid of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._document_id_guid
@document_id_guid.setter
def document_id_guid(self, document_id_guid):
"""Sets the document_id_guid of this EnvelopeDocument.
# noqa: E501
:param document_id_guid: The document_id_guid of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._document_id_guid = document_id_guid
@property
def error_details(self):
"""Gets the error_details of this EnvelopeDocument. # noqa: E501
:return: The error_details of this EnvelopeDocument. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this EnvelopeDocument.
:param error_details: The error_details of this EnvelopeDocument. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def include_in_download(self):
"""Gets the include_in_download of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The include_in_download of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._include_in_download
@include_in_download.setter
def include_in_download(self, include_in_download):
"""Sets the include_in_download of this EnvelopeDocument.
# noqa: E501
:param include_in_download: The include_in_download of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._include_in_download = include_in_download
@property
def include_in_download_metadata(self):
"""Gets the include_in_download_metadata of this EnvelopeDocument. # noqa: E501
:return: The include_in_download_metadata of this EnvelopeDocument. # noqa: E501
:rtype: PropertyMetadata
"""
return self._include_in_download_metadata
@include_in_download_metadata.setter
def include_in_download_metadata(self, include_in_download_metadata):
"""Sets the include_in_download_metadata of this EnvelopeDocument.
:param include_in_download_metadata: The include_in_download_metadata of this EnvelopeDocument. # noqa: E501
:type: PropertyMetadata
"""
self._include_in_download_metadata = include_in_download_metadata
@property
def name(self):
"""Gets the name of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The name of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EnvelopeDocument.
# noqa: E501
:param name: The name of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._name = name
@property
def name_metadata(self):
"""Gets the name_metadata of this EnvelopeDocument. # noqa: E501
:return: The name_metadata of this EnvelopeDocument. # noqa: E501
:rtype: PropertyMetadata
"""
return self._name_metadata
@name_metadata.setter
def name_metadata(self, name_metadata):
"""Sets the name_metadata of this EnvelopeDocument.
:param name_metadata: The name_metadata of this EnvelopeDocument. # noqa: E501
:type: PropertyMetadata
"""
self._name_metadata = name_metadata
@property
def order(self):
"""Gets the order of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The order of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this EnvelopeDocument.
# noqa: E501
:param order: The order of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._order = order
@property
def pages(self):
"""Gets the pages of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The pages of this EnvelopeDocument. # noqa: E501
:rtype: list[Page]
"""
return self._pages
@pages.setter
def pages(self, pages):
"""Sets the pages of this EnvelopeDocument.
# noqa: E501
:param pages: The pages of this EnvelopeDocument. # noqa: E501
:type: list[Page]
"""
self._pages = pages
@property
def signer_must_acknowledge(self):
"""Gets the signer_must_acknowledge of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The signer_must_acknowledge of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._signer_must_acknowledge
@signer_must_acknowledge.setter
def signer_must_acknowledge(self, signer_must_acknowledge):
"""Sets the signer_must_acknowledge of this EnvelopeDocument.
# noqa: E501
:param signer_must_acknowledge: The signer_must_acknowledge of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._signer_must_acknowledge = signer_must_acknowledge
@property
def signer_must_acknowledge_metadata(self):
"""Gets the signer_must_acknowledge_metadata of this EnvelopeDocument. # noqa: E501
:return: The signer_must_acknowledge_metadata of this EnvelopeDocument. # noqa: E501
:rtype: PropertyMetadata
"""
return self._signer_must_acknowledge_metadata
@signer_must_acknowledge_metadata.setter
def signer_must_acknowledge_metadata(self, signer_must_acknowledge_metadata):
"""Sets the signer_must_acknowledge_metadata of this EnvelopeDocument.
:param signer_must_acknowledge_metadata: The signer_must_acknowledge_metadata of this EnvelopeDocument. # noqa: E501
:type: PropertyMetadata
"""
self._signer_must_acknowledge_metadata = signer_must_acknowledge_metadata
@property
def size_bytes(self):
"""Gets the size_bytes of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The size_bytes of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._size_bytes
@size_bytes.setter
def size_bytes(self, size_bytes):
"""Sets the size_bytes of this EnvelopeDocument.
# noqa: E501
:param size_bytes: The size_bytes of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._size_bytes = size_bytes
@property
def template_locked(self):
"""Gets the template_locked of this EnvelopeDocument. # noqa: E501
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients. # noqa: E501
:return: The template_locked of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._template_locked
@template_locked.setter
def template_locked(self, template_locked):
"""Sets the template_locked of this EnvelopeDocument.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients. # noqa: E501
:param template_locked: The template_locked of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._template_locked = template_locked
@property
def template_required(self):
"""Gets the template_required of this EnvelopeDocument. # noqa: E501
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients. # noqa: E501
:return: The template_required of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._template_required
@template_required.setter
def template_required(self, template_required):
"""Sets the template_required of this EnvelopeDocument.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients. # noqa: E501
:param template_required: The template_required of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._template_required = template_required
@property
def type(self):
"""Gets the type of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The type of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this EnvelopeDocument.
# noqa: E501
:param type: The type of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._type = type
@property
def uri(self):
"""Gets the uri of this EnvelopeDocument. # noqa: E501
# noqa: E501
:return: The uri of this EnvelopeDocument. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this EnvelopeDocument.
# noqa: E501
:param uri: The uri of this EnvelopeDocument. # noqa: E501
:type: str
"""
self._uri = uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EnvelopeDocument, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvelopeDocument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import os
import tempfile
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.tools import freeze_graph # pylint: disable=g-direct-tensorflow-import
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.utils import config_util
from object_detection.utils import shape_utils
# pylint: disable=g-import-not-at-top
try:
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.contrib.quantize.python import graph_matcher
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
freeze_graph_with_def_protos = freeze_graph.freeze_graph_with_def_protos
def parse_side_inputs(side_input_shapes_string, side_input_names_string,
side_input_types_string):
"""Parses side input flags.
Args:
side_input_shapes_string: The shape of the side input tensors, provided as a
comma-separated list of integers. A value of -1 is used for unknown
dimensions. A `/` denotes a break, starting the shape of the next side
input tensor.
side_input_names_string: The names of the side input tensors, provided as a
comma-separated list of strings.
side_input_types_string: The type of the side input tensors, provided as a
comma-separated list of types, each of `string`, `integer`, or `float`.
Returns:
side_input_shapes: A list of shapes.
side_input_names: A list of strings.
side_input_types: A list of tensorflow dtypes.
"""
if side_input_shapes_string:
side_input_shapes = []
for side_input_shape_list in side_input_shapes_string.split('/'):
side_input_shape = [
int(dim) if dim != '-1' else None
for dim in side_input_shape_list.split(',')
]
side_input_shapes.append(side_input_shape)
else:
raise ValueError('When using side_inputs, side_input_shapes must be '
'specified in the input flags.')
if side_input_names_string:
side_input_names = list(side_input_names_string.split(','))
else:
raise ValueError('When using side_inputs, side_input_names must be '
'specified in the input flags.')
if side_input_types_string:
typelookup = {'float': tf.float32, 'int': tf.int32, 'string': tf.string}
side_input_types = [
typelookup[side_input_type]
for side_input_type in side_input_types_string.split(',')
]
else:
raise ValueError('When using side_inputs, side_input_types must be '
'specified in the input flags.')
return side_input_shapes, side_input_names, side_input_types
def rewrite_nn_resize_op(is_quantized=False):
"""Replaces a custom nearest-neighbor resize op with the Tensorflow version.
Some graphs use this custom version for TPU-compatibility.
Args:
is_quantized: True if the default graph is quantized.
"""
def remove_nn():
"""Remove nearest neighbor upsampling structures and replace with TF op."""
input_pattern = graph_matcher.OpTypePattern(
'FakeQuantWithMinMaxVars' if is_quantized else '*')
stack_1_pattern = graph_matcher.OpTypePattern(
'Pack', inputs=[input_pattern, input_pattern], ordered_inputs=False)
stack_2_pattern = graph_matcher.OpTypePattern(
'Pack', inputs=[stack_1_pattern, stack_1_pattern], ordered_inputs=False)
reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[stack_2_pattern, 'Const'], ordered_inputs=False)
consumer_pattern1 = graph_matcher.OpTypePattern(
'Add|AddV2|Max|Mul', inputs=[reshape_pattern, '*'],
ordered_inputs=False)
consumer_pattern2 = graph_matcher.OpTypePattern(
'StridedSlice', inputs=[reshape_pattern, '*', '*', '*'],
ordered_inputs=False)
def replace_matches(consumer_pattern):
"""Search for nearest neighbor pattern and replace with TF op."""
match_counter = 0
matcher = graph_matcher.GraphMatcher(consumer_pattern)
for match in matcher.match_graph(tf.get_default_graph()):
match_counter += 1
projection_op = match.get_op(input_pattern)
reshape_op = match.get_op(reshape_pattern)
consumer_op = match.get_op(consumer_pattern)
nn_resize = tf.image.resize_nearest_neighbor(
projection_op.outputs[0],
reshape_op.outputs[0].shape.dims[1:3],
align_corners=False,
name=os.path.split(reshape_op.name)[0] + '/resize_nearest_neighbor')
for index, op_input in enumerate(consumer_op.inputs):
if op_input == reshape_op.outputs[0]:
consumer_op._update_input(index, nn_resize) # pylint: disable=protected-access
break
return match_counter
match_counter = replace_matches(consumer_pattern1)
match_counter += replace_matches(consumer_pattern2)
tf.logging.info('Found and fixed {} matches'.format(match_counter))
return match_counter
# Applying twice because both inputs to Add could be NN pattern
total_removals = 0
while remove_nn():
total_removals += 1
# This number is chosen based on the nas-fpn architecture.
if total_removals > 4:
raise ValueError('Graph removal encountered a infinite loop.')
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file,
no_ema_collection=None):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
no_ema_collection: A list of namescope substrings to match the variables
to eliminate EMA.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
ema_variables_to_restore = config_util.remove_unecessary_ema(
ema_variables_to_restore, no_ema_collection)
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
def _side_input_tensor_placeholder(side_input_shape, side_input_name,
side_input_type):
"""Returns side input placeholder and side input tensor."""
side_input_tensor = tf.placeholder(
dtype=side_input_type, shape=side_input_shape, name=side_input_name)
return side_input_tensor, side_input_tensor
def _tf_example_input_placeholder(input_shape=None):
"""Returns input that accepts a batch of strings with tf examples.
Args:
input_shape: the shape to resize the output decoded images to (optional).
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
if input_shape is not None:
image_tensor = tf.image.resize(image_tensor, input_shape[1:3])
return image_tensor
return (batch_tf_example_placeholder,
shape_utils.static_or_dynamic_map_fn(
decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
def _encoded_image_string_tensor_input_placeholder(input_shape=None):
"""Returns input that accepts a batch of PNG or JPEG strings.
Args:
input_shape: the shape to resize the output decoded images to (optional).
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_image_str_placeholder = tf.placeholder(
dtype=tf.string,
shape=[None],
name='encoded_image_string_tensor')
def decode(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
if input_shape is not None:
image_tensor = tf.image.resize(image_tensor, input_shape[1:3])
return image_tensor
return (batch_image_str_placeholder,
tf.map_fn(
decode,
elems=batch_image_str_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder
}
def add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_multiclass_scores: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_classes_with_background] for containing class
score distribution for detected boxes including background if any.
* detection_features: (Optional) float32 tensor of shape
[batch, num_boxes, roi_height, roi_width, depth]
containing classifier features
for each detected box
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_keypoints: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_keypoints, 2] containing keypoints for each
detection box.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_multiclass_scores': [batch, max_detections,
num_classes_with_background]
'detection_features': [batch, num_boxes, roi_height, roi_width, depth]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'detection_keypoints': [batch, max_detections, num_keypoints, 2]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
multiclass_scores = postprocessed_tensors.get(
detection_fields.detection_multiclass_scores)
box_classifier_features = postprocessed_tensors.get(
detection_fields.detection_features)
raw_boxes = postprocessed_tensors.get(detection_fields.raw_detection_boxes)
raw_scores = postprocessed_tensors.get(detection_fields.raw_detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
if multiclass_scores is not None:
outputs[detection_fields.detection_multiclass_scores] = tf.identity(
multiclass_scores, name=detection_fields.detection_multiclass_scores)
if box_classifier_features is not None:
outputs[detection_fields.detection_features] = tf.identity(
box_classifier_features,
name=detection_fields.detection_features)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if raw_boxes is not None:
outputs[detection_fields.raw_detection_boxes] = tf.identity(
raw_boxes, name=detection_fields.raw_detection_boxes)
if raw_scores is not None:
outputs[detection_fields.raw_detection_scores] = tf.identity(
raw_scores, name=detection_fields.raw_detection_scores)
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = tf.identity(
keypoints, name=detection_fields.detection_keypoints)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
return outputs
def write_saved_model(saved_model_path,
frozen_graph_def,
inputs,
outputs):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
saved_model_path: Path to write SavedModel.
frozen_graph_def: tf.GraphDef holding frozen graph.
inputs: A tensor dictionary containing the inputs to a DetectionModel.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
"""
with tf.Graph().as_default():
with tf.Session() as sess:
tf.import_graph_def(frozen_graph_def, name='')
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)
tensor_info_inputs = {}
if isinstance(inputs, dict):
for k, v in inputs.items():
tensor_info_inputs[k] = tf.saved_model.utils.build_tensor_info(v)
else:
tensor_info_inputs['inputs'] = tf.saved_model.utils.build_tensor_info(
inputs)
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
))
builder.add_meta_graph_and_variables(
sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
"""Writes the graph and the checkpoint into disk."""
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with tf.Session() as sess:
saver = tf.train.Saver(
saver_def=input_saver_def, save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)
def _get_outputs_from_inputs(input_tensors, detection_model,
output_collection_name, **side_inputs):
inputs = tf.cast(input_tensors, dtype=tf.float32)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(
preprocessed_inputs, true_image_shapes, **side_inputs)
postprocessed_tensors = detection_model.postprocess(
output_tensors, true_image_shapes)
return add_output_tensor_nodes(postprocessed_tensors,
output_collection_name)
def build_detection_graph(input_type, detection_model, input_shape,
output_collection_name, graph_hook_fn,
use_side_inputs=False, side_input_shapes=None,
side_input_names=None, side_input_types=None):
"""Build the detection graph."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
placeholder_args = {}
side_inputs = {}
if input_shape is not None:
if (input_type != 'image_tensor' and
input_type != 'encoded_image_string_tensor' and
input_type != 'tf_example' and
input_type != 'tf_sequence_example'):
raise ValueError('Can only specify input shape for `image_tensor`, '
'`encoded_image_string_tensor`, `tf_example`, '
' or `tf_sequence_example` inputs.')
placeholder_args['input_shape'] = input_shape
placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
**placeholder_args)
placeholder_tensors = {'inputs': placeholder_tensor}
if use_side_inputs:
for idx, side_input_name in enumerate(side_input_names):
side_input_placeholder, side_input = _side_input_tensor_placeholder(
side_input_shapes[idx], side_input_name, side_input_types[idx])
print(side_input)
side_inputs[side_input_name] = side_input
placeholder_tensors[side_input_name] = side_input_placeholder
outputs = _get_outputs_from_inputs(
input_tensors=input_tensors,
detection_model=detection_model,
output_collection_name=output_collection_name,
**side_inputs)
# Add global step to the graph.
slim.get_or_create_global_step()
if graph_hook_fn: graph_hook_fn()
return outputs, placeholder_tensors
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names=None,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None,
write_inference_graph=False,
temp_checkpoint_prefix='',
use_side_inputs=False,
side_input_shapes=None,
side_input_names=None,
side_input_types=None):
"""Export helper."""
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
outputs, placeholder_tensor_dict = build_detection_graph(
input_type=input_type,
detection_model=detection_model,
input_shape=input_shape,
output_collection_name=output_collection_name,
graph_hook_fn=graph_hook_fn,
use_side_inputs=use_side_inputs,
side_input_shapes=side_input_shapes,
side_input_names=side_input_names,
side_input_types=side_input_types)
profile_inference_graph(tf.get_default_graph())
saver_kwargs = {}
if use_moving_averages:
if not temp_checkpoint_prefix:
# This check is to be compatible with both version of SaverDef.
if os.path.isfile(trained_checkpoint_prefix):
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
else:
temp_checkpoint_prefix = tempfile.mkdtemp()
replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
temp_checkpoint_prefix)
checkpoint_to_use = temp_checkpoint_prefix
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=checkpoint_to_use)
if write_inference_graph:
inference_graph_def = tf.get_default_graph().as_graph_def()
inference_graph_path = os.path.join(output_directory,
'inference_graph.pbtxt')
for node in inference_graph_def.node:
node.device = ''
with tf.gfile.GFile(inference_graph_path, 'wb') as f:
f.write(str(inference_graph_def))
if additional_output_tensor_names is not None:
output_node_names = ','.join(list(outputs.keys())+(
additional_output_tensor_names))
else:
output_node_names = ','.join(outputs.keys())
frozen_graph_def = freeze_graph.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph=frozen_graph_path,
clear_devices=True,
initializer_nodes='')
write_saved_model(saved_model_path, frozen_graph_def,
placeholder_tensor_dict, outputs)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_prefix,
output_directory,
input_shape=None,
output_collection_name='inference_op',
additional_output_tensor_names=None,
write_inference_graph=False,
use_side_inputs=False,
side_input_shapes=None,
side_input_names=None,
side_input_types=None):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of ['image_tensor',
'encoded_image_string_tensor', 'tf_example'].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_prefix: Path to the trained checkpoint file.
output_directory: Path to write outputs.
input_shape: Sets a fixed shape for an `image_tensor` input. If not
specified, will default to [None, None, None, 3].
output_collection_name: Name of collection to add output tensors to.
If None, does not add output tensors to a collection.
additional_output_tensor_names: list of additional output
tensors to include in the frozen graph.
write_inference_graph: If true, writes inference graph to disk.
use_side_inputs: If True, the model requires side_inputs.
side_input_shapes: List of shapes of the side input tensors,
required if use_side_inputs is True.
side_input_names: List of names of the side input tensors,
required if use_side_inputs is True.
side_input_types: List of types of the side input tensors,
required if use_side_inputs is True.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
graph_rewriter_fn = None
if pipeline_config.HasField('graph_rewriter'):
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config,
is_training=False)
_export_inference_graph(
input_type,
detection_model,
pipeline_config.eval_config.use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names,
input_shape,
output_collection_name,
graph_hook_fn=graph_rewriter_fn,
write_inference_graph=write_inference_graph,
use_side_inputs=use_side_inputs,
side_input_shapes=side_input_shapes,
side_input_names=side_input_names,
side_input_types=side_input_types)
pipeline_config.eval_config.use_moving_averages = False
config_util.save_pipeline_config(pipeline_config, output_directory)
def profile_inference_graph(graph):
"""Profiles the inference graph.
Prints model parameters and computation FLOPs given an inference graph.
BatchNorms are excluded from the parameter count due to the fact that
BatchNorms are usually folded. BatchNorm, Initializer, Regularizer
and BiasAdd are not considered in FLOP count.
Args:
graph: the inference graph.
"""
tfprof_vars_option = (
contrib_tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
tfprof_flops_option = contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS
# Batchnorm is usually folded during inference.
tfprof_vars_option['trim_name_regexes'] = ['.*BatchNorm.*']
# Initializer and Regularizer are only used in training.
tfprof_flops_option['trim_name_regexes'] = [
'.*BatchNorm.*', '.*Initializer.*', '.*Regularizer.*', '.*BiasAdd.*'
]
contrib_tfprof.model_analyzer.print_model_analysis(
graph, tfprof_options=tfprof_vars_option)
contrib_tfprof.model_analyzer.print_model_analysis(
graph, tfprof_options=tfprof_flops_option)
| |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event sequence RNN model."""
import collections
import copy
import functools
from magenta.common import beam_search
from magenta.common import state_util
from magenta.contrib import training as contrib_training
from magenta.models.shared import events_rnn_graph
from magenta.models.shared import model
import note_seq
import numpy as np
import tensorflow.compat.v1 as tf
# Model state when generating event sequences, consisting of the next inputs to
# feed the model, the current RNN state, the current control sequence (if
# applicable), and state for the current control sequence (if applicable).
ModelState = collections.namedtuple(
'ModelState', ['inputs', 'rnn_state', 'control_events', 'control_state'])
class EventSequenceRnnModelError(Exception):
pass
def _extend_control_events_default(control_events, events, state):
"""Default function for extending control event sequence.
This function extends a control event sequence by duplicating the final event
in the sequence. The control event sequence will be extended to have length
one longer than the generated event sequence.
Args:
control_events: The control event sequence to extend.
events: The list of generated events.
state: State maintained while generating, unused.
Returns:
The resulting state after extending the control sequence (in this case the
state will be returned unmodified).
"""
while len(control_events) <= len(events):
control_events.append(control_events[-1])
return state
class EventSequenceRnnModel(model.BaseModel):
"""Class for RNN event sequence generation models.
Currently this class only supports generation, of both event sequences and
note sequences (via event sequences). Support for model training will be added
at a later time.
"""
def __init__(self, config):
"""Initialize the EventSequenceRnnModel.
Args:
config: An EventSequenceRnnConfig containing the encoder/decoder and
HParams to use.
"""
super(EventSequenceRnnModel, self).__init__()
self._config = config
def _build_graph_for_generation(self):
events_rnn_graph.get_build_graph_fn('generate', self._config)()
def _batch_size(self):
"""Extracts the batch size from the graph."""
return int(self._session.graph.get_collection('inputs')[0].shape[0])
def _generate_step_for_batch(self, event_sequences, inputs, initial_state,
temperature):
"""Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`. These are extended by this method.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
temperature: The softmax temperature.
Returns:
final_state: The final RNN state, a numpy array the same size as
`initial_state`.
loglik: The log-likelihood of the chosen softmax value for each event
sequence, a 1-D numpy array of length
`self._batch_size()`. If `inputs` is a full-length inputs batch, the
log-likelihood of each entire sequence up to and including the
generated step will be computed and returned.
"""
assert len(event_sequences) == self._batch_size()
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_final_state = self._session.graph.get_collection('final_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs,
tuple(graph_initial_state): initial_state}
# For backwards compatibility, we only try to pass temperature if the
# placeholder exists in the graph.
if graph_temperature:
feed_dict[graph_temperature[0]] = temperature
final_state, softmax = self._session.run(
[graph_final_state, graph_softmax], feed_dict)
if isinstance(softmax, list):
if softmax[0].shape[1] > 1:
softmaxes = []
for beam in range(softmax[0].shape[0]):
beam_softmaxes = []
for event in range(softmax[0].shape[1] - 1):
beam_softmaxes.append(
[softmax[s][beam, event] for s in range(len(softmax))])
softmaxes.append(beam_softmaxes)
loglik = self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmaxes)
else:
loglik = np.zeros(len(event_sequences))
else:
if softmax.shape[1] > 1:
# The inputs batch is longer than a single step, so we also want to
# compute the log-likelihood of the event sequences up until the step
# we're generating.
loglik = self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmax[:, :-1, :])
else:
loglik = np.zeros(len(event_sequences))
indices = np.array(self._config.encoder_decoder.extend_event_sequences(
event_sequences, softmax))
if isinstance(softmax, list):
p = 1.0
for i in range(len(softmax)):
p *= softmax[i][range(len(event_sequences)), -1, indices[:, i]]
else:
p = softmax[range(len(event_sequences)), -1, indices]
return final_state, loglik + np.log(p)
def _generate_step(self, event_sequences, model_states, logliks, temperature,
extend_control_events_callback=None,
modify_events_callback=None):
"""Extends a list of event sequences by a single step each.
This method modifies the event sequences in place. It also returns the
modified event sequences and updated model states and log-likelihoods.
Args:
event_sequences: A list of event sequence objects, which are extended by
this method.
model_states: A list of model states, each of which contains model inputs
and initial RNN states.
logliks: A list containing the current log-likelihood for each event
sequence.
temperature: The softmax temperature.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
event_sequences: A list of extended event sequences. These are modified in
place but also returned.
final_states: A list of resulting model states, containing model inputs
for the next step along with RNN states for each event sequence.
logliks: A list containing the updated log-likelihood for each event
sequence.
"""
# Split the sequences to extend into batches matching the model batch size.
batch_size = self._batch_size()
num_seqs = len(event_sequences)
num_batches = int(np.ceil(num_seqs / float(batch_size)))
# Extract inputs and RNN states from the model states.
inputs = [model_state.inputs for model_state in model_states]
initial_states = [model_state.rnn_state for model_state in model_states]
# Also extract control sequences and states.
control_sequences = [
model_state.control_events for model_state in model_states]
control_states = [
model_state.control_state for model_state in model_states]
final_states = []
logliks = np.array(logliks, dtype=np.float32)
# Add padding to fill the final batch.
pad_amt = -len(event_sequences) % batch_size
padded_event_sequences = event_sequences + [
copy.deepcopy(event_sequences[-1]) for _ in range(pad_amt)]
padded_inputs = inputs + [inputs[-1]] * pad_amt
padded_initial_states = initial_states + [initial_states[-1]] * pad_amt
for b in range(num_batches):
i, j = b * batch_size, (b + 1) * batch_size
pad_amt = max(0, j - num_seqs)
# Generate a single step for one batch of event sequences.
batch_final_state, batch_loglik = self._generate_step_for_batch(
padded_event_sequences[i:j],
padded_inputs[i:j],
state_util.batch(padded_initial_states[i:j], batch_size),
temperature)
final_states += state_util.unbatch(
batch_final_state, batch_size)[:j - i - pad_amt]
logliks[i:j - pad_amt] += batch_loglik[:j - i - pad_amt]
# Construct inputs for next step.
if extend_control_events_callback is not None:
# We are conditioning on control sequences.
for idx in range(len(control_sequences)):
# Extend each control sequence to ensure that it is longer than the
# corresponding event sequence.
control_states[idx] = extend_control_events_callback(
control_sequences[idx], event_sequences[idx], control_states[idx])
next_inputs = self._config.encoder_decoder.get_inputs_batch(
control_sequences, event_sequences)
else:
next_inputs = self._config.encoder_decoder.get_inputs_batch(
event_sequences)
if modify_events_callback:
# Modify event sequences and inputs for next step.
modify_events_callback(
self._config.encoder_decoder, event_sequences, next_inputs)
model_states = [ModelState(inputs=inputs, rnn_state=final_state,
control_events=control_events,
control_state=control_state)
for inputs, final_state, control_events, control_state
in zip(next_inputs, final_states,
control_sequences, control_states)]
return event_sequences, model_states, logliks
def _generate_events(self, num_steps, primer_events, temperature=1.0,
beam_size=1, branch_factor=1, steps_per_iteration=1,
control_events=None, control_state=None,
extend_control_events_callback=(
_extend_control_events_default),
modify_events_callback=None):
"""Generate an event sequence from a primer sequence.
Args:
num_steps: The integer length in steps of the final event sequence, after
generation. Includes the primer.
primer_events: The primer event sequence, a Python list-like object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes events more
random, less than 1.0 makes events less random.
beam_size: An integer, beam size to use when generating event sequences
via beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
control_events: A sequence of control events upon which to condition the
generation. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the control events will be
used along with the target sequence to generate model inputs. In some
cases, the control event sequence cannot be fully-determined as later
control events depend on earlier generated events; use the
`extend_control_events_callback` argument to provide a function that
extends the control event sequence.
control_state: Initial state used by `extend_control_events_callback`.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
The generated event sequence (which begins with the provided primer).
Raises:
EventSequenceRnnModelError: If the primer sequence has zero length or
is not shorter than num_steps.
"""
if (control_events is not None and
not isinstance(self._config.encoder_decoder,
note_seq.ConditionalEventSequenceEncoderDecoder)):
raise EventSequenceRnnModelError(
'control sequence provided but encoder/decoder is not a '
'ConditionalEventSequenceEncoderDecoder')
if control_events is not None and extend_control_events_callback is None:
raise EventSequenceRnnModelError(
'must provide callback for extending control sequence (or use'
'default)')
if not primer_events:
raise EventSequenceRnnModelError(
'primer sequence must have non-zero length')
if len(primer_events) >= num_steps:
raise EventSequenceRnnModelError(
'primer sequence must be shorter than `num_steps`')
if len(primer_events) >= num_steps:
# Sequence is already long enough, no need to generate.
return primer_events
event_sequences = [copy.deepcopy(primer_events)]
# Construct inputs for first step after primer.
if control_events is not None:
# We are conditioning on a control sequence. Make sure it is longer than
# the primer sequence.
control_state = extend_control_events_callback(
control_events, primer_events, control_state)
inputs = self._config.encoder_decoder.get_inputs_batch(
[control_events], event_sequences, full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(
event_sequences, full_length=True)
if modify_events_callback:
# Modify event sequences and inputs for first step after primer.
modify_events_callback(
self._config.encoder_decoder, event_sequences, inputs)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_states = state_util.unbatch(self._session.run(graph_initial_state))
# Beam search will maintain a state for each sequence consisting of the next
# inputs to feed the model, and the current RNN state. We start out with the
# initial full inputs batch and the zero state.
initial_state = ModelState(
inputs=inputs[0], rnn_state=initial_states[0],
control_events=control_events, control_state=control_state)
generate_step_fn = functools.partial(
self._generate_step,
temperature=temperature,
extend_control_events_callback=
extend_control_events_callback if control_events is not None else None,
modify_events_callback=modify_events_callback)
events, _, loglik = beam_search(
initial_sequence=event_sequences[0],
initial_state=initial_state,
generate_step_fn=generate_step_fn,
num_steps=num_steps - len(primer_events),
beam_size=beam_size,
branch_factor=branch_factor,
steps_per_iteration=steps_per_iteration)
tf.logging.info('Beam search yields sequence with log-likelihood: %f ',
loglik)
return events
def _evaluate_batch_log_likelihood(self, event_sequences, inputs,
initial_state):
"""Evaluates the log likelihood of a batch of event sequences.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
Returns:
A Python list containing the log likelihood of each sequence in
`event_sequences`.
"""
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs,
tuple(graph_initial_state): initial_state}
# For backwards compatibility, we only try to pass temperature if the
# placeholder exists in the graph.
if graph_temperature:
feed_dict[graph_temperature[0]] = 1.0
softmax = self._session.run(graph_softmax, feed_dict)
return self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmax)
def _evaluate_log_likelihood(self, event_sequences, control_events=None):
"""Evaluate log likelihood for a list of event sequences of the same length.
Args:
event_sequences: A list of event sequences for which to evaluate the log
likelihood.
control_events: A sequence of control events upon which to condition the
event sequences. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the log likelihood of each
event sequence will be computed conditional on the control sequence.
Returns:
The log likelihood of each sequence in `event_sequences`.
Raises:
EventSequenceRnnModelError: If the event sequences are not all the
same length, or if the control sequence is shorter than the event
sequences.
"""
num_steps = len(event_sequences[0])
for events in event_sequences[1:]:
if len(events) != num_steps:
raise EventSequenceRnnModelError(
'log likelihood evaluation requires all event sequences to have '
'the same length')
if control_events is not None and len(control_events) < num_steps:
raise EventSequenceRnnModelError(
'control sequence must be at least as long as the event sequences')
batch_size = self._batch_size()
num_full_batches = len(event_sequences) // batch_size
loglik = np.empty(len(event_sequences))
# Since we're computing log-likelihood and not generating, the inputs batch
# doesn't need to include the final event in each sequence.
if control_events is not None:
# We are conditioning on a control sequence.
inputs = self._config.encoder_decoder.get_inputs_batch(
[control_events] * len(event_sequences),
[events[:-1] for events in event_sequences],
full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(
[events[:-1] for events in event_sequences], full_length=True)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_state = self._session.run(graph_initial_state)
offset = 0
for _ in range(num_full_batches):
# Evaluate a single step for one batch of event sequences.
batch_indices = range(offset, offset + batch_size)
batch_loglik = self._evaluate_batch_log_likelihood(
[event_sequences[i] for i in batch_indices],
[inputs[i] for i in batch_indices],
[initial_state] * len(batch_indices))
loglik[batch_indices] = batch_loglik
offset += batch_size
if offset < len(event_sequences):
# There's an extra non-full batch. Pad it with a bunch of copies of the
# final sequence.
num_extra = len(event_sequences) - offset
pad_size = batch_size - num_extra
batch_indices = range(offset, len(event_sequences))
batch_loglik = self._evaluate_batch_log_likelihood(
[event_sequences[i] for i in batch_indices] + [
copy.deepcopy(event_sequences[-1]) for _ in range(pad_size)],
[inputs[i] for i in batch_indices] + inputs[-1] * pad_size,
np.append([initial_state] * len(batch_indices),
np.tile(inputs[-1, :], (pad_size, 1)),
axis=0))
loglik[batch_indices] = batch_loglik[0:num_extra]
return loglik
class EventSequenceRnnConfig(object):
"""Stores a configuration for an event sequence RNN.
Only one of `steps_per_quarter` or `steps_per_second` will be applicable for
any particular model.
Attributes:
details: The GeneratorDetails message describing the config.
encoder_decoder: The EventSequenceEncoderDecoder or
ConditionalEventSequenceEncoderDecoder object to use.
hparams: The HParams containing hyperparameters to use. Will be merged with
default hyperparameter values.
steps_per_quarter: The integer number of quantized time steps per quarter
note to use.
steps_per_second: The integer number of quantized time steps per second to
use.
"""
def __init__(self, details, encoder_decoder, hparams,
steps_per_quarter=4, steps_per_second=100):
hparams_dict = {
'batch_size': 64,
'rnn_layer_sizes': [128, 128],
'dropout_keep_prob': 1.0,
'attn_length': 0,
'clip_norm': 3,
'learning_rate': 0.001,
'residual_connections': False,
'use_cudnn': False
}
hparams_dict.update(hparams.values())
self.details = details
self.encoder_decoder = encoder_decoder
self.hparams = contrib_training.HParams(**hparams_dict)
self.steps_per_quarter = steps_per_quarter
self.steps_per_second = steps_per_second
| |
# Copyright (c) 2008 Divmod. See LICENSE for details.
"""
Tests for the Axiom upgrade system.
"""
import sys, io
from zope.interface import Interface
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.python import filepath
from twisted.application.service import IService
from twisted.internet.defer import maybeDeferred, succeed
from twisted.python.reflect import namedModule
from twisted.python import log
from axiom.iaxiom import IAxiomaticCommand
from axiom import store, upgrade, item, errors, attributes
from axiom.upgrade import _StoreUpgrade
from axiom.item import declareLegacyItem
from axiom.scripts import axiomatic
from axiom.store import Store
from axiom.substore import SubStore
from axiom.plugins.axiom_plugins import Upgrade
from axiom.test.util import CommandStub, callWithStdoutRedirect
import six
def axiomInvalidate(itemClass):
"""
Remove the registered item class from the Axiom module system's memory,
including: the item's current schema, legacy schema declarations, and
upgraders.
This makes it possible, for example, to reload a module without Axiom
complaining about it.
This API is still in a test module because it is _NOT YET SAFE_ for using
while databases are open; it does not interact with open databases' caches,
for example.
@param itemClass: an Item subclass that you no longer wish to use.
"""
# Note, be very careful not to use comparison on attributes here. For
# example, do not use list.remove(), since it is equality based. -exarkun
for cascades in six.itervalues(attributes._cascadingDeletes):
for i in six.moves.builtins.range(len(cascades) - 1, -1, -1):
if cascades[i].type is itemClass:
del cascades[i]
store._typeNameToMostRecentClass.pop(itemClass.typeName, None)
for (tnam, schever) in list(item._legacyTypes.keys()):
if tnam == itemClass.typeName:
item._legacyTypes.pop((tnam, schever))
for k in list(upgrade._upgradeRegistry.keys()):
if k[0] == itemClass.typeName:
upgrade._upgradeRegistry.pop(k)
def axiomInvalidateModule(moduleObject):
"""
Call L{axiomInvalidate} on all Item subclasses defined in a module.
"""
for v in list(moduleObject.__dict__.values()):
if isinstance(v, item.MetaItem):
axiomInvalidate(v)
schemaModules = []
def loadSchemaModule(name):
schemaModules.append(namedModule(name))
result = schemaModules[-1]
choose(None)
return result
def choose(module=None):
"""
Choose among the various "adventurer" modules for upgrade tests.
@param module: the module object which should next be treated as "current".
"""
for old in schemaModules:
axiomInvalidateModule(old)
if module is not None:
six.moves.reload_module(module)
oldapp = loadSchemaModule('axiom.test.oldapp')
brokenapp = loadSchemaModule('axiom.test.brokenapp')
toonewapp = loadSchemaModule('axiom.test.toonewapp')
morenewapp = loadSchemaModule('axiom.test.morenewapp')
onestepapp = loadSchemaModule('axiom.test.onestepapp')
newapp = loadSchemaModule('axiom.test.newapp')
oldpath = loadSchemaModule('axiom.test.oldpath')
newpath = loadSchemaModule('axiom.test.newpath')
path_postcopy = loadSchemaModule('axiom.test.path_postcopy')
deleteswordapp = loadSchemaModule('axiom.test.deleteswordapp')
class SchemaUpgradeTest(unittest.TestCase):
def setUp(self):
self.dbdir = filepath.FilePath(self.mktemp())
def openStore(self, dbg=False):
self.currentStore = store.Store(self.dbdir, debug=dbg)
return self.currentStore
def closeStore(self):
"""
Close C{self.currentStore} and discard the reference. If there is a
store service running, stop it first.
"""
service = IService(self.currentStore)
if service.running:
result = service.stopService()
else:
result = succeed(None)
def close(ignored):
self.currentStore.close()
self.currentStore = None
result.addCallback(close)
return result
def startStoreService(self):
svc = IService(self.currentStore)
svc.getServiceNamed("Batch Processing Controller").disownServiceParent()
svc.startService()
def _logMessagesFrom(f):
L = []
log.addObserver(L.append)
d = maybeDeferred(f)
def x(ign):
log.removeObserver(L.append)
return ign
return d.addBoth(x).addCallback(lambda ign: L)
class SwordUpgradeTest(SchemaUpgradeTest):
def tearDown(self):
choose(oldapp)
def testUnUpgradeableStore(self):
self._testTwoObjectUpgrade()
choose(toonewapp)
self.assertRaises(errors.NoUpgradePathAvailable, self.openStore)
def test_upgradeWithMissingVersion(self):
playerID, swordID = self._testTwoObjectUpgrade()
choose(morenewapp)
s = self.openStore()
self.startStoreService()
def afterUpgrade(result):
player = s.getItemByID(playerID, autoUpgrade=False)
sword = s.getItemByID(swordID, autoUpgrade=False)
self._testPlayerAndSwordState(player, sword)
return s.whenFullyUpgraded().addCallback(afterUpgrade)
def test_upgradeWithMissingVersionAuto1(self):
playerID, swordID = self._testTwoObjectUpgrade()
choose(morenewapp)
s = self.openStore()
player = s.getItemByID(playerID)
sword = s.getItemByID(swordID)
self._testPlayerAndSwordState(player, sword)
def test_upgradeWithMissingVersionAuto2(self):
playerID, swordID = self._testTwoObjectUpgrade()
choose(morenewapp)
s = self.openStore()
sword = s.getItemByID(swordID)
player = s.getItemByID(playerID)
self._testPlayerAndSwordState(player, sword)
def test_upgradeSkipVersion(self):
"""
Verify that an upgrader registered to skip a version can execute properly.
"""
playerID, swordID = self._testTwoObjectUpgrade()
choose(onestepapp)
s = self.openStore()
self.startStoreService()
def afterUpgrade(result):
player = s.getItemByID(playerID, autoUpgrade=False)
sword = s.getItemByID(swordID, autoUpgrade=False)
self._testPlayerAndSwordState(player, sword)
return s.whenFullyUpgraded().addCallback(afterUpgrade)
def test_upgradeSkipVersionAuto1(self):
"""
Verify that an upgrader registered to skip a version can execute properly.
Auto-upgrade version, order #1.
"""
playerID, swordID = self._testTwoObjectUpgrade()
choose(onestepapp)
s = self.openStore()
player = s.getItemByID(playerID)
sword = s.getItemByID(swordID)
self._testPlayerAndSwordState(player, sword)
def test_upgradeSkipVersionAuto2(self):
"""
Verify that an upgrader registered to skip a version can execute properly.
Auto-upgrade version, order #2.
"""
playerID, swordID = self._testTwoObjectUpgrade()
choose(onestepapp)
s = self.openStore()
sword = s.getItemByID(swordID)
player = s.getItemByID(playerID)
self._testPlayerAndSwordState(player, sword)
def test_loggingAtAppropriateTimes(self):
"""
Verify that log messages show up when we do upgrade work, but then don't
when we don't.
"""
def someLogging(logMessages):
ok = False
unrelatedMessages = []
for msgdict in logMessages:
msgstr = ''.join(msgdict.get('message', ()))
if 'finished upgrading' in msgstr:
ok = True
else:
unrelatedMessages.append(msgstr)
self.failUnless(ok, "No messages related to upgrading: {!r}".format(unrelatedMessages))
s = self.openStore()
def afterUpgrade(noLogMessages):
for nmsgdict in noLogMessages:
mm = ''.join(nmsgdict.get('message', ()))
if mm:
self.failIfIn('finished upgrading', mm)
self.startStoreService()
return _logMessagesFrom(s.whenFullyUpgraded
).addCallback(afterUpgrade)
return _logMessagesFrom(self.testTwoObjectUpgrade_UseService).addCallback(someLogging)
def test_basicErrorLogging(self):
"""
Verify that if an exception is raised in an upgrader, the exception
will be logged.
"""
choose(oldapp)
s = self.openStore()
swordID = oldapp.Sword(
store=s,
name='flaming vorpal doom',
hurtfulness=7).storeID
self.closeStore()
choose(brokenapp)
s = self.openStore()
self.startStoreService()
def checkException(ign):
# It's redundant that the errback is called and the failure is
# logged. See #2638.
loggedErrors = self.flushLoggedErrors(errors.ItemUpgradeError)
self.assertEqual(len(loggedErrors), 1)
upgradeError = loggedErrors[0]
loggedErrors = self.flushLoggedErrors(brokenapp.UpgradersAreBrokenHere)
self.assertEqual(len(loggedErrors), 1)
oldType = item.declareLegacyItem(
oldapp.Sword.typeName,
oldapp.Sword.schemaVersion, {})
e = upgradeError.value
self.assertEqual(e.storeID, swordID)
self.assertIdentical(e.oldType, oldType)
self.assertIdentical(e.newType, brokenapp.Sword)
d = s.whenFullyUpgraded()
d = self.assertFailure(d, errors.ItemUpgradeError)
d.addCallback(checkException)
return d
def _testTwoObjectUpgrade(self):
choose(oldapp)
s = self.openStore()
self.assertIdentical(
store._typeNameToMostRecentClass[oldapp.Player.typeName],
oldapp.Player)
sword = oldapp.Sword(
store=s,
name='flaming vorpal doom',
hurtfulness=7)
player = oldapp.Player(
store=s,
name='Milton',
sword=sword)
self.closeStore()
# Perform an adjustment.
return player.storeID, sword.storeID
def testTwoObjectUpgrade_OuterFirst(self):
playerID, swordID = self._testTwoObjectUpgrade()
player, sword = self._testLoadPlayerFirst(playerID, swordID)
self._testPlayerAndSwordState(player, sword)
def testTwoObjectUpgrade_InnerFirst(self):
playerID, swordID = self._testTwoObjectUpgrade()
player, sword = self._testLoadSwordFirst(playerID, swordID)
self._testPlayerAndSwordState(player, sword)
def testTwoObjectUpgrade_AutoOrder(self):
playerID, swordID = self._testTwoObjectUpgrade()
player, sword = self._testAutoUpgrade(playerID, swordID)
self._testPlayerAndSwordState(player, sword)
def testTwoObjectUpgrade_UseService(self):
playerID, swordID = self._testTwoObjectUpgrade()
choose(newapp)
s = self.openStore()
self.startStoreService()
# XXX *this* test really needs 10 or so objects to play with in order
# to be really valid...
def afterUpgrade(result):
player = s.getItemByID(playerID, autoUpgrade=False)
sword = s.getItemByID(swordID, autoUpgrade=False)
self._testPlayerAndSwordState(player, sword)
# Stop that service we started.
return IService(s).stopService()
return s.whenFullyUpgraded().addCallback(afterUpgrade)
def _testAutoUpgrade(self, playerID, swordID):
choose(newapp)
s = self.openStore()
for dummy in s._upgradeManager.upgradeEverything():
pass
player = s.getItemByID(playerID, autoUpgrade=False)
sword = s.getItemByID(swordID, autoUpgrade=False)
return player, sword
def _testLoadPlayerFirst(self, playerID, swordID):
# Everything old is new again
choose(newapp)
s = self.openStore()
player = s.getItemByID(playerID)
sword = s.getItemByID(swordID)
return player, sword
def _testLoadSwordFirst(self, playerID, swordID):
choose(newapp)
s = self.openStore()
sword = s.getItemByID(swordID)
player = s.getItemByID(playerID)
return player, sword
def _testPlayerAndSwordState(self, player, sword):
assert not player.__legacy__
assert not sword.__legacy__
self.assertEqual(player.name, 'Milton')
self.assertFalse(hasattr(player, 'sword'))
self.assertEqual(sword.name, 'flaming vorpal doom')
self.assertEqual(sword.damagePerHit, 14)
self.assertFalse(hasattr(sword, 'hurtfulness'))
self.assertEqual(sword.owner.storeID, player.storeID)
self.assertEqual(type(sword.owner), type(player))
self.assertEqual(sword.owner, player)
self.assertEqual(sword.activated, 1)
self.assertEqual(player.activated, 1)
def test_multipleLegacyVersions(self):
"""
If multiple legacy schema versions are present, all of them should be
upgraded.
"""
playerID, swordID = self._testTwoObjectUpgrade()
choose(newapp)
s = self.openStore()
self.startStoreService()
def afterFirstUpgrade(result):
choose(morenewapp)
s = self.openStore()
self.startStoreService()
return s.whenFullyUpgraded().addCallback(afterSecondUpgrade, s)
def afterSecondUpgrade(result, store):
player = store.getItemByID(playerID, autoUpgrade=False)
sword = store.getItemByID(swordID, autoUpgrade=False)
self._testPlayerAndSwordState(player, sword)
d = s.whenFullyUpgraded()
d.addCallback(lambda ignored: self.closeStore())
d.addCallback(afterFirstUpgrade)
return d
class SubStoreCompat(SwordUpgradeTest):
def setUp(self):
self.topdbdir = filepath.FilePath(self.mktemp())
self.subStoreID = None
def openStore(self):
self.currentTopStore = store.Store(self.topdbdir)
if self.subStoreID is not None:
self.currentSubStore = self.currentTopStore.getItemByID(self.subStoreID).open()
else:
ss = SubStore.createNew(self.currentTopStore,
['sub'])
self.subStoreID = ss.storeID
self.currentSubStore = ss.open()
return self.currentSubStore
def closeStore(self):
"""
Close C{self.currentTopStore} and C{self.currentSubStore}. If there is
a store service running in C{self.currentTopStore}, stop it first.
"""
service = IService(self.currentTopStore)
if service.running:
result = service.stopService()
else:
result = succeed(None)
def stopped(ignored):
self.currentSubStore.close()
self.currentTopStore.close()
self.currentSubStore = None
self.currentTopStore = None
result.addCallback(stopped)
return result
def startStoreService(self):
svc = IService(self.currentTopStore)
svc.getServiceNamed("Batch Processing Controller").disownServiceParent()
svc.startService()
class PathUpgrade(SchemaUpgradeTest):
"""
Tests for items with path attributes, using
registerAttributeCopyingUpgrader.
"""
def _runPathUpgrade(self, module):
"""
Load the 'oldpath' module, then upgrade items created from it to the
versions in the specified module.
"""
axiomInvalidateModule(module)
six.moves.reload_module(oldpath)
self.openStore()
nfp = self.currentStore.newFilePath("pathname")
oldpath.Path(store=self.currentStore,
thePath=nfp)
self.closeStore()
axiomInvalidateModule(oldpath)
six.moves.reload_module(module)
self.openStore()
self.startStoreService()
return nfp, self.currentStore.whenFullyUpgraded()
def testUpgradePath(self):
"""
Verify that you can upgrade a path attribute in the simplest possible
way.
"""
nfp, d = self._runPathUpgrade(newpath)
def checkPathEquivalence(n):
self.assertEqual(
self.currentStore.findUnique(newpath.Path).thePath.path,
nfp.path)
return d.addCallback(checkPathEquivalence)
def test_postCopy(self):
"""
Ensure that a post-copy function, if specified to
registerAttributeCopyingUpgrader, is run after item upgrade.
"""
nfp, d = self._runPathUpgrade(path_postcopy)
path2 = nfp.child("foo")
def checkPath(_):
self.assertEqual(
self.currentStore.findUnique(path_postcopy.Path).thePath.path,
path2.path)
return d.addCallback(checkPath)
oldcirc = loadSchemaModule('axiom.test.oldcirc')
newcirc = loadSchemaModule('axiom.test.newcirc')
oldobsolete = loadSchemaModule('axiom.test.oldobsolete')
newobsolete = loadSchemaModule('axiom.test.newobsolete')
class IObsolete(Interface):
"""
Interface representing an undesirable feature.
"""
class DeletionTest(SchemaUpgradeTest):
def testCircular(self):
"""
If you access an item, B, through a reference on another item, A, which
is deleted in the course of B's upgrade, you should still get a
reference to B.
"""
six.moves.reload_module(oldcirc)
self.openStore()
b = oldcirc.B(a=oldcirc.A(store=self.currentStore),
store=self.currentStore)
b.a.b = b
self.closeStore()
axiomInvalidateModule(oldcirc)
six.moves.reload_module(newcirc)
self.openStore()
origA = self.currentStore.findUnique(newcirc.A)
origB = origA.b
secondA = self.currentStore.findUnique(newcirc.A)
secondB = secondA.b
self.assertEqual(origB, secondB)
self.assertNotEqual(origA, secondA)
def testPowerupsFor(self):
"""
Powerups deleted during upgrades should be omitted from the results of
powerupsFor.
"""
six.moves.reload_module(oldobsolete)
self.openStore()
o = oldobsolete.Obsolete(store=self.currentStore)
self.currentStore.powerUp(o, IObsolete)
# sanity check
self.assertEqual(IObsolete(self.currentStore), o)
self.closeStore()
axiomInvalidateModule(oldobsolete)
six.moves.reload_module(newobsolete)
self.openStore()
self.assertEqual(list(self.currentStore.powerupsFor(IObsolete)), [])
self.closeStore()
axiomInvalidateModule(newobsolete)
def testPowerupsAdapt(self):
"""
Powerups deleted during upgrades should be omitted from the results of
powerupsFor.
"""
six.moves.reload_module(oldobsolete)
self.openStore()
o = oldobsolete.Obsolete(store=self.currentStore)
self.currentStore.powerUp(o, IObsolete)
# sanity check
self.assertEqual(IObsolete(self.currentStore), o)
self.closeStore()
axiomInvalidateModule(oldobsolete)
six.moves.reload_module(newobsolete)
self.openStore()
self.assertEqual(IObsolete(self.currentStore, None), None)
self.closeStore()
axiomInvalidateModule(newobsolete)
two_upgrades_old = loadSchemaModule(
'axiom.test.upgrade_fixtures.two_upgrades_old')
two_upgrades_new = loadSchemaModule(
'axiom.test.upgrade_fixtures.two_upgrades_new')
reentrant_old = loadSchemaModule(
'axiom.test.upgrade_fixtures.reentrant_old')
reentrant_new = loadSchemaModule(
'axiom.test.upgrade_fixtures.reentrant_new')
override_init_old = loadSchemaModule(
'axiom.test.upgrade_fixtures.override_init_old')
override_init_new = loadSchemaModule(
'axiom.test.upgrade_fixtures.override_init_new')
replace_attribute_old = loadSchemaModule(
'axiom.test.upgrade_fixtures.replace_attribute_old')
replace_attribute_new = loadSchemaModule(
'axiom.test.upgrade_fixtures.replace_attribute_new')
replace_delete_old = loadSchemaModule(
'axiom.test.upgrade_fixtures.replace_delete_old')
replace_delete_new = loadSchemaModule(
'axiom.test.upgrade_fixtures.replace_delete_new')
class DuringUpgradeTests(unittest.TestCase):
"""
Tests for upgraders' interactions with each other and with the Store while
an upgrader is running.
"""
def tearDown(self):
choose(None)
dbdir = None
currentStore = None
def storeWithVersion(self, chosenModule):
"""
Open a store with a particular module chosen, closing the old store if
it was open already.
"""
choose(chosenModule)
if self.currentStore is not None:
self.currentStore.close()
if self.dbdir is None:
self.dbdir = filepath.FilePath(self.mktemp())
self.currentStore = store.Store(self.dbdir)
return self.currentStore
def test_upgradeLegacyReference(self):
"""
Let a and b be two items which are being upgraded, instances of item
types A and B respectively. a has a reference attribute, x, which
points to b. In A's 1to2 upgrader, newA.x is set to oldA.x, which is
(at that time) a DummyItem, i.e. an item with __legacy__ set to True.
This is a regression test for a bug in this scenario where caching was
too aggressive, and a.x would still refer to a legacy item after the
upgrade was finished. After performing this upgrade, a.x should refer
to a B v2, i.e. an upgraded version of b.
"""
old = self.storeWithVersion(two_upgrades_old)
storeID = two_upgrades_old.Referrer(
store=old,
referee=two_upgrades_old.Referee(store=old)).storeID
new = self.storeWithVersion(two_upgrades_new)
referrer = new.getItemByID(storeID)
referee = referrer.referee
self.assertTrue(
isinstance(referee, two_upgrades_new.Referee),
"{!r} is a {!r} but should be {!r}".format(
referee, type(referee), two_upgrades_new.Referee))
def test_reentrantUpgraderFailure(self):
"""
If, while an upgrader is running, it triggers its own upgrade, there
should be a loud failure; it's already hard enough to deal with upgrade
ordering and querying for legacy items; upgraders cannot reasonably be
written to be correct in the face of reentrancy.
"""
old = self.storeWithVersion(reentrant_old)
storeID = reentrant_old.Simple(store=old).storeID
new = self.storeWithVersion(reentrant_new)
self.assertRaises(errors.UpgraderRecursion, new.getItemByID, storeID)
# A whitebox flourish to make sure our state tracking is correct:
self.assertFalse(new._upgradeManager._currentlyUpgrading,
"No upgraders should currently be in progress.")
def test_overridenInitializerInUpgrader(self):
"""
A subclass of Item which overrides __init__ should be cached by the end
of Item.__init__, so that logic written by the subclass has normal
caching semantics.
"""
old = self.storeWithVersion(override_init_old)
storeID = override_init_old.Simple(store=old).storeID
new = self.storeWithVersion(override_init_new)
upgraded = new.getItemByID(storeID)
simpleSelf, simpleGotItem = upgraded.verify
self.assertIdentical(upgraded, simpleSelf)
self.assertIdentical(upgraded, simpleGotItem)
def _reentrantReferenceForeignUpgrader(self, oldModule, newModule):
old = self.storeWithVersion(oldModule)
storeID = oldModule.Referrer(
store=old, referee=oldModule.Referee(
store=old, value=oldModule.OLD_VALUE)).storeID
new = self.storeWithVersion(newModule)
referrer = new.getItemByID(storeID)
upgraded = referrer.referee
self.assertEqual(
upgraded.value,
newModule.NEW_VALUE,
"Upgraded reference does not have new value.")
def test_referenceModifiedByForeignUpgrader(self):
"""
If the value of a reference on an Item requires an upgrade and the
upgrade replaces the value of the reference with a different Item, then
evaluating the reference attribute on the referrer should result in the
new value of the attribute.
"""
self._reentrantReferenceForeignUpgrader(
replace_attribute_old, replace_attribute_new)
def test_cascadingDeletedReferenceModifiedByForeignUpgrader(self):
"""
If the value of a whenDeleted=CASCADE reference on an Item requires an
upgrade and the upgrade replaces the value of the reference with a new
Item and then deletes the old value of the reference, then evaluating
the reference attribute on the referrer should result in the new value
of the attribute.
"""
self._reentrantReferenceForeignUpgrader(
replace_delete_old, replace_delete_new)
class AxiomaticUpgradeTest(unittest.TestCase):
"""
L{Upgrade} implements an I{axiomatic} subcommand for synchronously
upgrading all items in a store.
"""
def setUp(self):
"""
Create a temporary on-disk Store and an instance of L{Upgrade}.
"""
self.dbdir = self.mktemp()
self.store = store.Store(self.dbdir)
def tearDown(self):
"""
Close the temporary Store.
"""
self.store.close()
def test_providesCommandInterface(self):
"""
L{Upgrade} provides L{IAxiomaticCommand}.
"""
self.assertTrue(verifyObject(IAxiomaticCommand, Upgrade))
def test_axiomaticSubcommand(self):
"""
L{Upgrade} is available as a subcommand of I{axiomatic}.
"""
subCommands = axiomatic.Options().subCommands
[options] = [cmd[2] for cmd in subCommands if cmd[0] == 'upgrade']
self.assertIdentical(options, Upgrade)
def test_successOutput(self):
"""
Upon successful completion of the upgrade, L{Upgrade} writes a success
message to stdout.
"""
cmd = Upgrade()
cmd.parent = CommandStub(self.store, 'upgrade')
result, output = callWithStdoutRedirect(cmd.parseOptions, [])
self.assertEqual(output.getvalue(), 'Upgrade complete\n')
def test_axiomaticUpgradeEverything(self):
"""
L{Upgrade.upgradeStore} upgrades all L{Item}s.
"""
choose(oldapp)
swordID = oldapp.Sword(
store=self.store, name='broadsword', hurtfulness=5).storeID
self.store.close()
choose(deleteswordapp)
cmd = Upgrade()
cmd.parent = CommandStub(store.Store(self.dbdir), 'upgrade')
result, output = callWithStdoutRedirect(
cmd.parseOptions, ['--count', '100'])
self.store = store.Store(self.dbdir)
self.assertRaises(
KeyError, self.store.getItemByID, swordID, autoUpgrade=False)
def test_axiomaticUpgradeExceptionBubbling(self):
"""
Exceptions encountered by L{Upgrade.upgradeStore} are handled and
re-raised as L{errors.ItemUpgradeError} with attributes indicating
which L{Item} was being upgraded when the exception occurred.
"""
choose(oldapp)
swordID = oldapp.Sword(
store=self.store, name='longsword', hurtfulness=4).storeID
self.store.close()
choose(brokenapp)
self.store = store.Store(self.dbdir)
cmd = Upgrade()
cmd.parent = CommandStub(self.store, 'upgrade')
cmd.count = 100
err = self.assertRaises(
errors.ItemUpgradeError,
callWithStdoutRedirect, cmd.upgradeStore, self.store)
self.assertTrue(
err.originalFailure.check(brokenapp.UpgradersAreBrokenHere))
oldType = item.declareLegacyItem(
oldapp.Sword.typeName,
oldapp.Sword.schemaVersion, {})
self.assertEqual(err.storeID, swordID)
self.assertIdentical(err.oldType, oldType)
self.assertIdentical(err.newType, brokenapp.Sword)
def test_axiomaticUpgradePerformFails(self):
"""
If an exception occurs while upgrading items, L{Upgrade.postOptions}
reports the item and schema version for which it occurred and returns
without exception.
"""
choose(oldapp)
swordID = oldapp.Sword(
store=self.store, name='rapier', hurtfulness=3).storeID
self.store.close()
choose(brokenapp)
self.store = store.Store(self.dbdir)
cmd = Upgrade()
cmd.parent = CommandStub(self.store, 'upgrade')
result, output = callWithStdoutRedirect(
cmd.parseOptions, ['--count', '100'])
lines = output.getvalue().splitlines()
# Ensure that the original error is output.
self.assertEqual(lines[0], 'Upgrader error:')
self.assertTrue(len(lines) > 2)
oldType = oldapp.Sword
newType = store._typeNameToMostRecentClass[oldType.typeName]
msg = cmd.errorMessageFormat % (
oldType.typeName, swordID, oldType.schemaVersion,
newType.schemaVersion)
self.assertTrue(lines[-1].startswith(msg))
def test_upgradeStoreRecursing(self):
"""
L{Upgrade} upgrades L{Item}s in substores.
"""
choose(oldapp)
ss1 = SubStore.createNew(self.store, ['a'])
ss2 = SubStore.createNew(self.store, ['b'])
swordIDs = [
(ss1.storeID, oldapp.Sword(store=ss1.open(), name='foo').storeID),
(ss2.storeID, oldapp.Sword(store=ss2.open(), name='bar').storeID)]
del ss1, ss2
self.store.close()
choose(deleteswordapp)
self.store = store.Store(self.dbdir)
cmd = Upgrade()
cmd.parent = CommandStub(self.store, 'upgrade')
callWithStdoutRedirect(cmd.parseOptions, [])
for (ssid, swordID) in swordIDs:
self.assertRaises(
KeyError,
self.store.getItemByID(ssid).open().getItemByID, swordID)
class StoreUpgradeTests(unittest.TestCase):
"""
Tests for L{upgrade._StoreUpgrade}.
"""
def setUp(self):
self.store = Store()
self._upgrader = _StoreUpgrade(self.store)
def test_queueMultipleVersions(self):
"""
If multiple schema versions are queued for upgrade, upgrades should be
attempted for all of them (but only attempted once per version).
"""
legacy1 = declareLegacyItem('test_type', 1, {})
legacy2 = declareLegacyItem('test_type', 2, {})
self._upgrader.queueTypeUpgrade(legacy1)
self._upgrader.queueTypeUpgrade(legacy2)
self._upgrader.queueTypeUpgrade(legacy2)
self.assertEqual(len(self._upgrader._oldTypesRemaining), 2)
| |
import itertools
from collections import defaultdict
import numpy as np
from accumulate import models
class Trials():
""" Simulate and analyze 2 category accumulation designs. """
def __init__(self, l):
if (l % 2) == 0:
self.l = float(l)
else:
raise ValueError('l must be even.')
self.l = float(l)
self.trial_count = 0
self.trials = self._generate_trials()
self.max_trial_count = (2 ** int(l))/2
## self.max_trial_count is used to stop self.trials
## iterations.
##
## We only want to iterate over the first half of trials
## as the second half is the first half's reflection.
## Examining the second half would double the needed
## computatioans while adding no useful information.
def _generate_trials(self):
""" Create a generator of all trial permutations. """
self.trial_count = 0
## reset
return itertools.product('AB', repeat=int(self.l))
## Calculate all possible unique combinations
## for a given trial length.
def _hamming(self, trial):
""" Return the minimum hamming distance between the two 'undecidable'
trials (e.g. ABAB, BABA when l is 4) and <trial>. """
# Create the two
# undecidable trials
refA = 'AB' * int(self.l / 2)
refB = 'BA' * int(self.l / 2)
# Calculate the two Hamming Ds
dA = 0
dB = 0
for ii,t in enumerate(trial):
if t != refA[ii]:
dA += 1
if t != refB[ii]:
dB += 1
# Return the smallest
# of the two Ds
return min(dA, dB)
def _count(self, trial):
""" Return a count of As and Bs for <trial>. """
# cA is the number of As
cA = 0
for t in trial:
if t == 'A':
cA += 1
# cB is the l - cA...
return cA, (int(self.l) - cA)
def print_trials(self):
""" Print all trials to stdout. """
# Print then reset trials
print(list(self.trials))
self.trials = self._generate_trials()
def categorize(self, model):
""" Return category decisions, scores for both the chosen and
the not, the number of exemplars experienced, using the
decision criterion <decide> ('count', 'bayes', 'likelihood',
'drift', 'information' or 'last') and <threshold> (0-1).
If the decider requires extra parameters, include them in the
params dictionary, e.g. the drift decider needs a weight, w,
so params would be {'w':0.25} if w was 0.25. """
# OK. Run the models.
model_results = defaultdict(dict)
while self.trial_count < self.max_trial_count:
trial = ''.join(self.trials.next())
# Make a decision
decision = model(trial)
## If the decider needs parameters construct
## via closure, see the code
## accumulate.models.construct for details
# Then store it in the (2) nested dict, model_results
model_results[trial][model.__name__] = decision
# Update the stop counter
self.trial_count += 1
# For the next model, refresh trials.
self.trials = self._generate_trials()
return model_results
def distances(self):
"""
Return the minimum Hamming Distance between the two
'undecidable' trials types (e.g. ABAB, BABA when l is 4).
This may be used an objective measure of trial difficulty.
Low scores suggest greater difficulty.
"""
# Calc the and return (in a dict) the distances.
dist = dict()
for ii, trial in enumerate(self.trials):
if ii < self.max_trial_count:
dist[''.join(trial)] = self._hamming(trial)
else:
break
self.trials = self._generate_trials()
return dist
def counts(self):
""" Return the number of As and Bs. """
# Return the A/B counts in a dict
cnts = dict()
for ii, trial in enumerate(self.trials):
if ii < self.max_trial_count:
cnts[''.join(trial)] = self._count(trial)
else:
break
self.trials =self._generate_trials()
return cnts
def maxspeed(self, start, stop):
""" Calculate the speed with which A and B accumulate over a
window. The window is defined by start and stop, ranging
from 0 to l-1. """
speeds = dict()
for trial in self.trials:
# Apply the window to the trial
windowed = trial[start:stop + 1]
# Now calc the speed of
# the windowed trial.
# First count A and Bs
cA = 0
cB = 0
l = float(len(windowed))
for w in windowed:
if w == 'A':
cA += 1
else:
cB += 1
# Then divide by the length
# and pick the largest
speeds[''.join(trial)] = max(cA / l, cB / l)
self.trials =self._generate_trials()
return speeds
def write_trials(self, encoding=None):
""" Write out trials, each row is a trial.
If <encoding> is a list of length 2 the first entry will be used to
encode 'A' the second for 'B'. """
import csv
# Re-encode... if not None
# and of length 2
en_trials = []
if encoding != None:
if len(encoding) == 2:
# Loop over trials and each element,
# appending the re-encoded elements.
for ii, trial in enumerate(self.trials):
if ii < self.max_trial_count:
en_t = []
for t in trial:
if t == 'A':
en_t.append(encoding[0])
else:
en_t.append(encoding[1])
en_trials.append(tuple(en_t))
## converting to tuples so it is
## identical in format to self.trials
## thought I doubt this will ever matter.
else:
break
else:
raise ValueError('<encoding> can only have two entries.')
else:
# Assign if encoding was None.
en_trials = self.trials
# Write it out...
f = open(str(int(self.l)) + 'trials.dat', 'wb')
w = csv.writer(f, delimiter='\t')
w.writerows(en_trials)
f.flush()
f.close()
# Finally reset trials
self.trials = self._generate_trials()
| |
import errno
import io
import os
import shutil
from django.core.cache import cache as default_cache, caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.files import File
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from PIL import (Image as PILImage,
ImageFile,
JpegImagePlugin)
from betty.conf.app import settings
from betty.cropper.flush import get_cache_flusher
from betty.cropper.tasks import search_image_quality
from jsonfield import JSONField
# Make best effort to load corrupt images
ImageFile.LOAD_TRUNCATED_IMAGES = True
logger = __import__('logging').getLogger(__name__)
ANIMATED_EXTENSIONS = ['gif', 'jpg']
CROP_EXTENSIONS = ["png", "jpg"]
def source_upload_to(instance, filename):
return os.path.join(instance.path(), filename)
def optimized_upload_to(instance, filename):
_path, ext = os.path.splitext(filename)
return os.path.join(instance.path(), "optimized{}".format(ext))
def optimize_image(image_model, image_buffer, filename):
im = PILImage.open(image_buffer)
# Let's cache some important stuff
format = im.format
icc_profile = im.info.get("icc_profile")
quantization = getattr(im, "quantization", None)
subsampling = None
if format == "JPEG":
try:
subsampling = JpegImagePlugin.get_sampling(im)
except IndexError:
# Ignore if sampling fails
logger.debug('JPEG sampling failed, ignoring')
except:
# mparent(2016-03-25): Eventually eliminate "catch all", but need to log errors to see
# if we're missing any other exception types in the wild
logger.exception('JPEG sampling error')
if im.size[0] > settings.BETTY_MAX_WIDTH:
# If the image is really large, we'll save a more reasonable version as the "original"
height = settings.BETTY_MAX_WIDTH * float(im.size[1]) / float(im.size[0])
im = im.resize((settings.BETTY_MAX_WIDTH, int(round(height))), PILImage.ANTIALIAS)
out_buffer = io.BytesIO()
if format == "JPEG" and im.mode == "RGB":
# For JPEG files, we need to make sure that we keep the quantization profile
try:
im.save(
out_buffer,
icc_profile=icc_profile,
qtables=quantization,
subsampling=subsampling,
format="JPEG")
except ValueError as e:
# Maybe the image already had an invalid quant table?
if e.args[:1] == ('Invalid quantization table',):
out_buffer = io.BytesIO() # Make sure it's empty after failed save attempt
im.save(
out_buffer,
icc_profile=icc_profile,
format=format,
)
else:
raise
else:
im.save(out_buffer,
icc_profile=icc_profile,
format=format)
image_model.optimized.save(filename, File(out_buffer))
else:
# No modifications, just save original as optimized
image_buffer.seek(0)
image_model.optimized.save(filename, File(image_buffer))
image_model.save()
class Ratio(object):
def __init__(self, ratio):
self.string = ratio
self.height = 0
self.width = 0
if ratio != "original":
if len(ratio.split("x")) != 2:
raise ValueError("Improper ratio!")
self.width = int(ratio.split("x")[0])
self.height = int(ratio.split("x")[1])
class ImageManager(models.Manager):
def create_from_path(self, path, filename=None, name=None, credit=None):
"""Creates an image object from a TemporaryUploadedFile insance"""
image_buffer = io.BytesIO(open(path, 'rb').read())
im = PILImage.open(image_buffer)
if filename is None:
filename = os.path.split(path)[1]
if name is None:
name = filename
image = self.create(
name=name,
credit=credit,
width=im.size[0],
height=im.size[1]
)
# Copy temp image file to S3
image_buffer.seek(0)
image.source.save(filename, File(image_buffer))
# If the image is a GIF, we need to do some special stuff
if im.format == "GIF":
image.animated = True
image.save()
# Use temp image path (instead of pulling from S3)
image_buffer.seek(0)
optimize_image(image_model=image, image_buffer=image_buffer, filename=filename)
if settings.BETTY_JPEG_QUALITY_RANGE:
search_image_quality.apply_async(args=(image.id,))
return image
def save_crop_to_disk(image_data, path):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
with open(path, 'wb+') as out:
out.write(image_data)
def _read_from_storage(file_field):
"""Convenience wrapper to cache strorage backend and ensure entire file is read and properly
closed.
Currently source images are never deleted, so there is no need for a cache.delete() call
anywhere. To be safe, cache expriation is set via BETTY_CACHE_STORAGE_SEC.
"""
if file_field:
try:
cache = caches['storage']
except InvalidCacheBackendError:
cache = default_cache
cache_key = ':'.join(['storage', file_field.name])
raw_image = cache.get(cache_key)
if not raw_image:
with file_field as f:
raw_image = f.read()
cache.set(cache_key, raw_image, settings.BETTY_CACHE_STORAGE_SEC)
return io.BytesIO(raw_image)
class Image(models.Model):
name = models.CharField(max_length=255)
credit = models.CharField(max_length=120, null=True, blank=True)
source = models.FileField(upload_to=source_upload_to,
max_length=255, null=True, blank=True)
optimized = models.FileField(upload_to=optimized_upload_to,
max_length=255, null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
width = models.IntegerField(null=True, blank=True)
selections = JSONField(null=True, blank=True)
jpeg_quality = models.IntegerField(null=True, blank=True)
jpeg_quality_settings = JSONField(null=True, blank=True)
animated = models.BooleanField(default=False)
# Used for "If-Modified-Since/304" handling
last_modified = models.DateTimeField(auto_now=True)
objects = ImageManager()
class Meta:
permissions = (
("read", "Can search images, and see the detail data"),
("crop", "Can crop images")
)
@property
def id_string(self):
id_string = ""
for index, char in enumerate(str(self.id)):
if index % 4 == 0 and index != 0:
id_string += "/"
id_string += char
return id_string
@property
def best(self):
"""Convenience method to prefer optimzied over source image, if available."""
if self.optimized:
return self.optimized
else:
return self.source
def read_best_bytes(self):
return _read_from_storage(self.best)
def read_source_bytes(self):
return _read_from_storage(self.source)
def read_optimized_bytes(self):
return _read_from_storage(self.optimized)
def get_height(self):
"""Lazily returns the height of the image
If the width exists in the database, that value will be returned,
otherwise the width will be read source image."""
if not self.height:
self._refresh_dimensions()
return self.height
def get_width(self):
"""Lazily returns the width of the image
If the width exists in the database, that value will be returned,
otherwise the width will be read source image."""
if not self.width:
self._refresh_dimensions()
return self.width
def _refresh_dimensions(self):
img = PILImage.open(self.read_source_bytes())
self.height = img.size[1]
self.width = img.size[0]
def get_selection(self, ratio):
"""Returns the image selection for a given ratio
If the selection for this ratio has been set manually, that value
is returned exactly, otherwise the selection is auto-generated."""
# This is kiiiiinda a hack. If we have an optimized image, hack up the height and width.
if self.width > settings.BETTY_MAX_WIDTH and self.optimized:
height = settings.BETTY_MAX_WIDTH * float(self.height) / float(self.width)
self.height = int(round(height))
self.width = settings.BETTY_MAX_WIDTH
selection = None
if self.selections is not None:
if ratio.string in self.selections:
selection = self.selections.get(ratio.string)
# Here I need to check for all kinds of bad data.
if selection['y1'] > self.get_height() or selection['x1'] > self.get_width():
selection = None
elif selection['y1'] < selection['y0'] or selection['x1'] < selection['x0']:
selection = None
else:
for key in ('x0', 'x1', 'y0', 'y1'):
if selection[key] < 0:
selection = None
break
if selection is None:
source_aspect = self.get_width() / float(self.get_height())
selection_aspect = ratio.width / float(ratio.height)
min_x = 0
min_y = 0
max_x = self.get_width()
max_y = self.get_height()
if source_aspect > selection_aspect:
offset = (max_x - (max_y * ratio.width / ratio.height)) / 2.0
min_x = offset
max_x -= offset
if source_aspect < selection_aspect:
offset = (max_y - (max_x * ratio.height / ratio.width)) / 2.0
min_y = offset
max_y -= offset
selection = {
'x0': int(min_x),
'y0': int(min_y),
'x1': int(max_x),
'y1': int(max_y)
}
if selection['y1'] > self.get_height():
selection['y1'] = int(self.get_height())
if selection['x1'] > self.get_width():
selection['x1'] = int(self.get_width())
if selection['x0'] < 0:
selection['x0'] = 0
if selection['y0'] < 0:
selection['y0'] = 0
return selection
def clear_crops(self, ratios=None):
if ratios is None:
ratios = list(settings.BETTY_RATIOS)
ratios.append("original")
# Optional cache flush support
flusher = get_cache_flusher()
if flusher:
paths = []
for ratio_slug in ratios:
# Since might now know which formats to flush (since maybe not saving crops to
# disk), need to flush all possible crops.
paths += [self.get_absolute_url(ratio=ratio_slug, width=width, extension=extension)
for extension in CROP_EXTENSIONS
for width in sorted(set(settings.BETTY_WIDTHS +
settings.BETTY_CLIENT_ONLY_WIDTHS))]
if self.animated:
for extension in ANIMATED_EXTENSIONS:
paths.append(self.get_animated_url(extension=extension))
flusher(paths)
# Optional disk crops support
if settings.BETTY_SAVE_CROPS_TO_DISK:
for ratio_slug in (ratios + ['animated']):
ratio_path = os.path.join(self.path(settings.BETTY_SAVE_CROPS_TO_DISK_ROOT),
ratio_slug)
if os.path.exists(ratio_path):
shutil.rmtree(ratio_path)
def get_jpeg_quality(self, width):
quality = None
if self.jpeg_quality_settings:
closest = 0
for w, q in self.jpeg_quality_settings.items():
if abs(width - int(w)) < abs(width - closest):
closest = int(w)
quality = self.jpeg_quality_settings[w]
return quality
def path(self, root=None):
id_string = ""
for index, char in enumerate(str(self.id)):
if index % 4 == 0:
id_string += "/"
id_string += char
if root is None:
root = settings.BETTY_IMAGE_ROOT
return os.path.join(root, id_string[1:])
def get_source(self):
image_bytes = self.read_source_bytes()
# Detect format
img = PILImage.open(image_bytes)
return image_bytes.getvalue(), img.format.lower()
def get_animated(self, extension):
"""Legacy (Pre-v2.0) animated behavior.
Originally betty just wrote these to disk on image creation and let NGINX try-files
automatically serve these animated GIF + JPG.
"""
assert self.animated
img_bytes = self.read_best_bytes()
if extension == "jpg":
# Thumbnail
img = PILImage.open(img_bytes)
if img.mode != "RGB":
img = img.convert("RGB")
img_bytes = io.BytesIO()
img.save(img_bytes, "JPEG")
elif extension != "gif":
raise Exception('Unsupported extension')
if settings.BETTY_SAVE_CROPS_TO_DISK:
save_crop_to_disk(img_bytes.getvalue(),
os.path.join(self.path(settings.BETTY_SAVE_CROPS_TO_DISK_ROOT),
'animated',
'original.{}'.format(extension)))
return img_bytes.getvalue()
def crop(self, ratio, width, extension):
img = PILImage.open(self.read_best_bytes())
icc_profile = img.info.get("icc_profile")
if ratio.string == 'original':
ratio.width = img.size[0]
ratio.height = img.size[1]
selection = self.get_selection(ratio)
try:
img = img.crop((selection['x0'], selection['y0'], selection['x1'], selection['y1']))
except ValueError:
# Looks like we have bad height and width data. Let's reload that and try again.
self.width = img.size[0]
self.height = img.size[1]
self.save()
selection = self.get_selection(ratio)
img = img.crop((selection['x0'], selection['y0'], selection['x1'], selection['y1']))
height = int(round(width * float(ratio.height) / float(ratio.width)))
img = img.resize((width, height), PILImage.ANTIALIAS)
if extension == "jpg":
if img.mode != "RGB":
img = img.convert("RGB")
pillow_kwargs = {"format": "jpeg"}
if self.get_jpeg_quality(width):
pillow_kwargs["quality"] = self.get_jpeg_quality(width)
elif img.format == "JPEG":
pillow_kwargs["quality"] = "keep"
else:
pillow_kwargs["quality"] = settings.BETTY_DEFAULT_JPEG_QUALITY
if extension == "png":
# Fix "cannot write mode CMYK as PNG" errors
# https://github.com/python-pillow/Pillow/issues/1380
if img.mode == 'CMYK':
img = img.convert('RGB')
pillow_kwargs = {"format": "png"}
if icc_profile:
pillow_kwargs["icc_profile"] = icc_profile
tmp = io.BytesIO()
img.save(tmp, **pillow_kwargs)
if settings.BETTY_SAVE_CROPS_TO_DISK:
# We only want to save this to the filesystem if it's one of our usual widths.
if width in settings.BETTY_WIDTHS or not settings.BETTY_WIDTHS:
ratio_dir = os.path.join(self.path(settings.BETTY_SAVE_CROPS_TO_DISK_ROOT),
ratio.string)
save_crop_to_disk(tmp.getvalue(),
os.path.join(ratio_dir, "%d.%s" % (width, extension)))
return tmp.getvalue()
def get_absolute_url(self, ratio="original", width=600, extension="jpg"):
return reverse("betty.cropper.views.crop", kwargs={
"id": self.id_string,
"ratio_slug": ratio,
"width": width,
"extension": extension
})
def get_animated_url(self, extension="gif"):
return reverse("betty.cropper.views.animated", kwargs={
"id": self.id_string,
"extension": extension
})
def to_native(self):
"""Returns a Python dictionary, sutiable for Serialization"""
# This is kiiiiinda a hack. If we have an optimized image, hack up the height and width.
if self.width > settings.BETTY_MAX_WIDTH and self.optimized:
height = settings.BETTY_MAX_WIDTH * float(self.height) / float(self.width)
self.height = int(round(height))
self.width = settings.BETTY_MAX_WIDTH
data = {
'id': self.id,
'name': self.name,
'width': self.get_width(),
'height': self.get_height(),
'credit': self.credit,
'selections': {}
}
for ratio in settings.BETTY_RATIOS:
data['selections'][ratio] = self.get_selection(Ratio(ratio))
data['selections'][ratio]["source"] = "auto"
if self.selections and data['selections'][ratio] == self.selections.get(ratio):
data['selections'][ratio]["source"] = "user"
return data
def cache_key(self):
"""
Returns string unique to cache instance
"""
return "image-{}".format(self.id)
@receiver(models.signals.post_delete, sender=Image)
def auto_flush_and_delete_files_on_delete(sender, instance, **kwargs):
instance.clear_crops()
for file_field in [instance.source, instance.optimized]:
if file_field:
file_field.delete(save=False)
| |
"""Provide support for MuTect and other paired analysis tools."""
from distutils.version import LooseVersion
import os
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.utils import file_exists, get_in, open_gzipsafe
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.variation import bamprep, bedutils, gatk, vcfutils, scalpel
from bcbio.variation.realign import has_aligned_reads
from bcbio.variation.vcfutils import bgzip_and_index
from bcbio.log import logger
_PASS_EXCEPTIONS = set(["java.lang.RuntimeException: "
"java.lang.IllegalArgumentException: "
"Comparison method violates its general contract!",
"java.lang.IllegalArgumentException: "
"Comparison method violates its general contract!"])
def _check_mutect_version(broad_runner):
mutect_version = broad_runner.get_mutect_version()
try:
assert mutect_version is not None
except AssertionError:
logger.warn("WARNING")
logger.warn("MuTect version could not be determined from jar file. "
"Please ensure you are using at least version 1.1.5, "
"as versions 1.1.4 and lower have known issues.")
logger.warn("Proceeding but assuming correct version 1.1.5.")
else:
try:
assert LooseVersion(mutect_version) >= LooseVersion("1.1.5")
except AssertionError:
message = ("MuTect 1.1.4 and lower is known to have incompatibilities "
"with Java < 7, and this may lead to problems in analyses. "
"Please use MuTect 1.1.5 or higher (note that it requires "
"Java 7).")
raise ValueError(message)
def _config_params(base_config, assoc_files, region, out_file, items):
"""Add parameters based on configuration variables, associated files and genomic regions.
"""
params = []
dbsnp = assoc_files.get("dbsnp")
if dbsnp:
params += ["--dbsnp", dbsnp]
cosmic = assoc_files.get("cosmic")
if cosmic:
params += ["--cosmic", cosmic]
variant_regions = bedutils.population_variant_regions(items)
region = subset_variant_regions(variant_regions, region, out_file)
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
# set low frequency calling parameter if adjusted
# to set other MuTect parameters on contamination, pass options to resources for mutect
# --fraction_contamination --minimum_normal_allele_fraction
min_af = tz.get_in(["algorithm", "min_allele_fraction"], base_config)
if min_af:
params += ["--minimum_mutation_cell_fraction", "%.2f" % (min_af / 100.0)]
resources = config_utils.get_resources("mutect", base_config)
if resources.get("options") is not None:
params += [str(x) for x in resources.get("options", [])]
# Output quality scores
if "--enable_qscore_output" not in params:
params.append("--enable_qscore_output")
# drf not currently supported in MuTect to turn off duplicateread filter
# params += gatk.standard_cl_params(items)
return params
def _mutect_call_prep(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Preparation work for MuTect.
"""
base_config = items[0]["config"]
broad_runner = broad.runner_from_path("picard", base_config)
broad_runner.run_fn("picard_index_ref", ref_file)
broad_runner = broad.runner_from_config(base_config, "mutect")
_check_mutect_version(broad_runner)
for x in align_bams:
bam.index(x, base_config)
paired = vcfutils.get_paired_bams(align_bams, items)
if not paired:
raise ValueError("Specified MuTect calling but 'tumor' phenotype not present in batch\n"
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/"
"pipelines.html#cancer-variant-calling\n"
"for samples: %s" % ", " .join([dd.get_sample_name(x) for x in items]))
params = ["-R", ref_file, "-T", "MuTect", "-U", "ALLOW_N_CIGAR_READS"]
params += ["--read_filter", "NotPrimaryAlignment"]
params += ["-I:tumor", paired.tumor_bam]
params += ["--tumor_sample_name", paired.tumor_name]
if paired.normal_bam is not None:
params += ["-I:normal", paired.normal_bam]
params += ["--normal_sample_name", paired.normal_name]
if paired.normal_panel is not None:
params += ["--normal_panel", paired.normal_panel]
params += _config_params(base_config, assoc_files, region, out_file, items)
return broad_runner, params
def mutect_caller(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run the MuTect paired analysis algorithm.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not file_exists(out_file):
base_config = items[0]["config"]
broad_runner = broad.runner_from_config(base_config, "mutect")
out_file_mutect = (out_file.replace(".vcf", "-mutect.vcf")
if "vcf" in out_file else out_file + "-mutect.vcf")
broad_runner, params = \
_mutect_call_prep(align_bams, items, ref_file, assoc_files,
region, out_file_mutect)
if (not isinstance(region, (list, tuple)) and
not all(has_aligned_reads(x, region) for x in align_bams)):
vcfutils.write_empty_vcf(out_file)
return
out_file_orig = "%s-orig%s" % utils.splitext_plus(out_file_mutect)
if not file_exists(out_file_orig):
with file_transaction(config, out_file_orig) as tx_out_file:
# Rationale: MuTect writes another table to stdout, which we don't need
params += ["--vcf", tx_out_file, "-o", os.devnull]
broad_runner.run_mutect(params)
is_paired = "-I:normal" in params
if not utils.file_uptodate(out_file_mutect, out_file_orig):
out_file_mutect = _fix_mutect_output(out_file_orig, config, out_file_mutect, is_paired)
indelcaller = vcfutils.get_indelcaller(base_config)
if ("scalpel" in indelcaller.lower() and region and isinstance(region, (tuple, list))
and chromhacks.is_autosomal_or_sex(region[0])):
# Scalpel InDels
out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf")
if "vcf" in out_file else out_file + "-somaticIndels.vcf")
if scalpel.is_installed(items[0]["config"]):
if not is_paired:
vcfutils.check_paired_problems(items)
scalpel._run_scalpel_caller(align_bams, items, ref_file, assoc_files,
region=region, out_file=out_file_indels)
else:
scalpel._run_scalpel_paired(align_bams, items, ref_file, assoc_files,
region=region, out_file=out_file_indels)
out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels],
out_file=out_file,
ref_file=items[0]["sam_ref"],
config=items[0]["config"],
region=region)
else:
utils.symlink_plus(out_file_mutect, out_file)
elif "pindel" in indelcaller.lower():
from bcbio.structural import pindel
out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf")
if "vcf" in out_file else out_file + "-somaticIndels.vcf")
if pindel.is_installed(items[0]["config"]):
pindel._run_tumor_pindel_caller(align_bams, items, ref_file, assoc_files, region=region,
out_file=out_file_indels)
out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels],
out_file=out_file,
ref_file=ref_file,
config=items[0]["config"],
region=region)
else:
utils.symlink_plus(out_file_mutect, out_file)
elif (("somaticindeldetector" in indelcaller.lower() or "sid" in indelcaller.lower())
and "appistry" in broad_runner.get_mutect_version()):
# SomaticIndelDetector InDels
out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf")
if "vcf" in out_file else out_file + "-somaticIndels.vcf")
params_indels = _SID_call_prep(align_bams, items, ref_file, assoc_files,
region, out_file_indels)
with file_transaction(config, out_file_indels) as tx_out_file:
params_indels += ["-o", tx_out_file]
broad_runner.run_mutect(params_indels)
out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels],
out_file=out_file,
ref_file=items[0]["sam_ref"],
config=items[0]["config"],
region=region)
else:
utils.symlink_plus(out_file_mutect, out_file)
return out_file
def _SID_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Preparation work for SomaticIndelDetector.
"""
base_config = items[0]["config"]
for x in align_bams:
bam.index(x, base_config)
params = ["-R", ref_file, "-T", "SomaticIndelDetector", "-U", "ALLOW_N_CIGAR_READS"]
# Limit per base read start count to between 200-10000, i.e. from any base
# can no more 10000 new reads begin.
# Further, limit maxNumberOfReads accordingly, otherwise SID discards
# windows for high coverage panels.
paired = vcfutils.get_paired_bams(align_bams, items)
params += ["--read_filter", "NotPrimaryAlignment"]
params += ["-I:tumor", paired.tumor_bam]
min_af = float(get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
if paired.normal_bam is not None:
params += ["-I:normal", paired.normal_bam]
# notice there must be at least 4 reads of coverage in normal
params += ["--filter_expressions", "T_COV<6||N_COV<4||T_INDEL_F<%s||T_INDEL_CF<0.7" % min_af]
else:
params += ["--unpaired"]
params += ["--filter_expressions", "COV<6||INDEL_F<%s||INDEL_CF<0.7" % min_af]
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
return params
def _fix_mutect_output(orig_file, config, out_file, is_paired):
"""Adjust MuTect output to match other callers.
- Rename allelic fraction field in mutect output from FA to FREQ to standarize with other tools
- Remove extra 'none' samples introduced when calling tumor-only samples
"""
out_file_noc = out_file.replace(".vcf.gz", ".vcf")
none_index = -1
with file_transaction(config, out_file_noc) as tx_out_file:
with open_gzipsafe(orig_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
for line in in_handle:
if not is_paired and line.startswith("#CHROM"):
parts = line.rstrip().split("\t")
none_index = parts.index("none")
del parts[none_index]
line = "\t".join(parts) + "\n"
elif line.startswith("##FORMAT=<ID=FA"):
line = line.replace("=FA", "=FREQ")
elif not line.startswith("#"):
if none_index > 0:
parts = line.rstrip().split("\t")
del parts[none_index]
line = "\t".join(parts) + "\n"
line = line.replace("FA", "FREQ")
out_handle.write(line)
return bgzip_and_index(out_file_noc, config)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.test_session(use_gpu=False) as sess:
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testStaticShapeInfoPreservedWhenNewShapeIsProvidedAndStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
self.assertAllEqual([3, 6, 7], sp_output.get_shape())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testInvalidRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.test_session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
def testInvalidDimensionSizeStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
with self.assertRaisesRegexp(ValueError, "should have dimension sizes"):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidDimensionSizeDynamic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = array_ops.placeholder(dtype=dtypes.int32)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: [3, 7, 5]})
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.test_session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillFloat(self):
with self.test_session(use_gpu=False) as sess:
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
class SparseReduceTest(test_util.TensorFlowTestCase):
# [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims, do_sum):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_ans = densified
if reduction_axes is None:
if do_sum:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
if do_sum:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if do_sum:
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
else:
tf_dense_ans = sparse_ops.sparse_reduce_max(sp_t, reduction_axes,
keep_dims)
out_dense = tf_dense_ans.eval()
if do_sum:
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t,
reduction_axes,
keep_dims)
else:
tf_sparse_ans = sparse_ops.sparse_reduce_max_sparse(sp_t,
reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False, False)
self._compare(sp_t, reduction_axes, ndims, False, True)
self._compare(sp_t, reduction_axes, ndims, True, False)
self._compare(sp_t, reduction_axes, ndims, True, True)
@unittest.skipIf(np.__version__ == "1.13.0", "numpy 1.13 bug")
def testSimpleAndRandomInputs(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_sum(sp_t, 2).eval()
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_max(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_max(sp_t, 2).eval()
@unittest.skipIf(np.__version__ == "1.13.0", "numpy 1.13 bug")
def testGradient(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.dense_shape,
result_tensor.values).eval()
self.assertAllEqual(result_np, res_densified)
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.eval().dtype, np.float64)
def testCwiseAdd(self):
with self.test_session(use_gpu=False):
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with self.test_session(use_gpu=False):
densified = constant_op.constant(sp_vals_np)
sp_result = sparse_ops.sparse_softmax(batched_sp_t).eval(
).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result.eval(), sp_result)
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with self.test_session(use_gpu=False):
result = sparse_ops.sparse_softmax(sp_t).eval()
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices.eval(), result.indices)
self.assertAllEqual(shape, result.dense_shape)
def testGradient(self):
x_shape = [2, 5, 10]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with self.test_session(use_gpu=False):
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
self._assertSparseTensorValueEqual(expected.eval(), min_tf)
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.test_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with self.test_session(use_gpu=False):
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
class SparseTransposeTest(test.TestCase):
def testTranspose(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
with self.test_session(use_gpu=False):
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = array_ops.rank(dn_input).eval()
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
self.assertAllEqual(dn_trans, expected_trans)
class SparsePlaceholderTest(test.TestCase):
def testPlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
self.assertAllEqual([10, 47], foo.get_shape())
def testPartialShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(None, 47))
self.assertAllEqual([None, None], foo.get_shape().as_list())
def testNoShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=None)
self.assertAllEqual(None, foo.get_shape())
if __name__ == "__main__":
googletest.main()
| |
"""hug/interface.py
Defines the various interface hug provides to expose routes to functions
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import argparse
import os
import sys
from collections import OrderedDict
from functools import lru_cache, partial, wraps
import falcon
import hug._empty as empty
import hug.api
import hug.output_format
import hug.types as types
from falcon import HTTP_BAD_REQUEST
from hug import introspect
from hug._async import asyncio_call
from hug.exceptions import InvalidTypeData
from hug.format import parse_content_type
from hug.types import MarshmallowSchema, Multiple, OneOf, SmartBoolean, Text, text
class Interfaces(object):
"""Defines the per-function singleton applied to hugged functions defining common data needed by all interfaces"""
def __init__(self, function):
self.api = hug.api.from_object(function)
self.spec = getattr(function, 'original', function)
self.arguments = introspect.arguments(function)
self.name = introspect.name(function)
self._function = function
self.is_coroutine = introspect.is_coroutine(self.spec)
if self.is_coroutine:
self.spec = getattr(self.spec, '__wrapped__', self.spec)
self.takes_args = introspect.takes_args(self.spec)
self.takes_kwargs = introspect.takes_kwargs(self.spec)
self.parameters = list(introspect.arguments(self.spec, self.takes_kwargs + self.takes_args))
if self.takes_kwargs:
self.kwarg = self.parameters.pop(-1)
if self.takes_args:
self.arg = self.parameters.pop(-1)
self.parameters = tuple(self.parameters)
self.defaults = dict(zip(reversed(self.parameters), reversed(self.spec.__defaults__ or ())))
self.required = self.parameters[:-(len(self.spec.__defaults__ or ())) or None]
self.is_method = introspect.is_method(self.spec) or introspect.is_method(function)
if self.is_method:
self.required = self.required[1:]
self.parameters = self.parameters[1:]
self.all_parameters = set(self.parameters)
if self.spec is not function:
self.all_parameters.update(self.arguments)
self.transform = self.spec.__annotations__.get('return', None)
self.directives = {}
self.input_transformations = {}
for name, transformer in self.spec.__annotations__.items():
if isinstance(transformer, str):
continue
elif hasattr(transformer, 'directive'):
self.directives[name] = transformer
continue
if hasattr(transformer, 'from_string'):
transformer = transformer.from_string
elif hasattr(transformer, 'load'):
transformer = MarshmallowSchema(transformer)
elif hasattr(transformer, 'deserialize'):
transformer = transformer.deserialize
self.input_transformations[name] = transformer
def __call__(__hug_internal_self, *args, **kwargs):
""""Calls the wrapped function, uses __hug_internal_self incase self is passed in as a kwarg from the wrapper"""
if not __hug_internal_self.is_coroutine:
return __hug_internal_self._function(*args, **kwargs)
return asyncio_call(__hug_internal_self._function, *args, **kwargs)
class Interface(object):
"""Defines the basic hug interface object, which is responsible for wrapping a user defined function and providing
all the info requested in the function as well as the route
A Interface object should be created for every kind of protocal hug supports
"""
__slots__ = ('interface', '_api', 'defaults', 'parameters', 'required', '_outputs', 'on_invalid', 'requires',
'validate_function', 'transform', 'examples', 'output_doc', 'wrapped', 'directives', 'all_parameters',
'raise_on_invalid', 'invalid_outputs')
def __init__(self, route, function):
if route.get('api', None):
self._api = route['api']
if 'examples' in route:
self.examples = route['examples']
if not hasattr(function, 'interface'):
function.__dict__['interface'] = Interfaces(function)
self.interface = function.interface
self.requires = route.get('requires', ())
if 'validate' in route:
self.validate_function = route['validate']
if 'output_invalid' in route:
self.invalid_outputs = route['output_invalid']
if not 'parameters' in route:
self.defaults = self.interface.defaults
self.parameters = self.interface.parameters
self.all_parameters = self.interface.all_parameters
self.required = self.interface.required
else:
self.defaults = route.get('defaults', {})
self.parameters = tuple(route['parameters'])
self.all_parameters = set(route['parameters'])
self.required = tuple([parameter for parameter in self.parameters if parameter not in self.defaults])
if 'output' in route:
self.outputs = route['output']
self.transform = route.get('transform', None)
if self.transform is None and not isinstance(self.interface.transform, (str, type(None))):
self.transform = self.interface.transform
if hasattr(self.transform, 'dump'):
self.transform = self.transform.dump
self.output_doc = self.transform.__doc__
elif self.transform or self.interface.transform:
output_doc = (self.transform or self.interface.transform)
self.output_doc = output_doc if type(output_doc) is str else output_doc.__doc__
self.raise_on_invalid = route.get('raise_on_invalid', False)
if 'on_invalid' in route:
self.on_invalid = route['on_invalid']
elif self.transform:
self.on_invalid = self.transform
defined_directives = self.api.directives()
used_directives = set(self.parameters).intersection(defined_directives)
self.directives = {directive_name: defined_directives[directive_name] for directive_name in used_directives}
self.directives.update(self.interface.directives)
@property
def api(self):
return getattr(self, '_api', self.interface.api)
@property
def outputs(self):
return getattr(self, '_outputs', None)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs # pragma: no cover - generally re-implemented by sub classes
def validate(self, input_parameters):
"""Runs all set type transformers / validators against the provided input parameters and returns any errors"""
errors = {}
for key, type_handler in self.interface.input_transformations.items():
if self.raise_on_invalid:
if key in input_parameters:
input_parameters[key] = type_handler(input_parameters[key])
else:
try:
if key in input_parameters:
input_parameters[key] = type_handler(input_parameters[key])
except InvalidTypeData as error:
errors[key] = error.reasons or str(error.message)
except Exception as error:
if hasattr(error, 'args') and error.args:
errors[key] = error.args[0]
else:
errors[key] = str(error)
for require in self.interface.required:
if not require in input_parameters:
errors[require] = "Required parameter '{}' not supplied".format(require)
if not errors and getattr(self, 'validate_function', False):
errors = self.validate_function(input_parameters)
return errors
def check_requirements(self, request=None, response=None):
"""Checks to see if all requirements set pass
if all requirements pass nothing will be returned
otherwise, the error reported will be returned
"""
for requirement in self.requires:
conclusion = requirement(response=response, request=request, module=self.api.module)
if conclusion and conclusion is not True:
return conclusion
def documentation(self, add_to=None):
"""Produces general documentation for the interface"""
doc = OrderedDict if add_to is None else add_to
usage = self.interface.spec.__doc__
if usage:
doc['usage'] = usage
if getattr(self, 'requires', None):
doc['requires'] = [getattr(requirement, '__doc__', requirement.__name__) for requirement in self.requires]
doc['outputs'] = OrderedDict()
doc['outputs']['format'] = self.outputs.__doc__
doc['outputs']['content_type'] = self.outputs.content_type
parameters = [param for param in self.parameters if not param in ('request', 'response', 'self')
and not param in ('api_version', 'body')
and not param.startswith('hug_')
and not hasattr(param, 'directive')]
if parameters:
inputs = doc.setdefault('inputs', OrderedDict())
types = self.interface.spec.__annotations__
for argument in parameters:
kind = types.get(argument, text)
if getattr(kind, 'directive', None) is True:
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = kind if isinstance(kind, str) else kind.__doc__
default = self.defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
return doc
class Local(Interface):
"""Defines the Interface responsible for exposing functions locally"""
__slots__ = ('skip_directives', 'skip_validation', 'version')
def __init__(self, route, function):
super().__init__(route, function)
self.version = route.get('version', None)
if 'skip_directives' in route:
self.skip_directives = True
if 'skip_validation' in route:
self.skip_validation = True
self.interface.local = self
def __get__(self, instance, kind):
"""Support instance methods"""
return partial(self.__call__, instance) if instance else self.__call__
@property
def __name__(self):
return self.interface.spec.__name__
@property
def __module__(self):
return self.interface.spec.__module__
def __call__(self, *args, **kwargs):
"""Defines how calling the function locally should be handled"""
for requirement in self.requires:
lacks_requirement = self.check_requirements()
if lacks_requirement:
return self.outputs(lacks_requirement) if self.outputs else lacks_requirement
for index, argument in enumerate(args):
kwargs[self.parameters[index]] = argument
if not getattr(self, 'skip_directives', False):
for parameter, directive in self.directives.items():
if parameter in kwargs:
continue
arguments = (self.defaults[parameter], ) if parameter in self.defaults else ()
kwargs[parameter] = directive(*arguments, api=self.api, api_version=self.version,
interface=self)
if not getattr(self, 'skip_validation', False):
errors = self.validate(kwargs)
if errors:
errors = {'errors': errors}
if getattr(self, 'on_invalid', False):
errors = self.on_invalid(errors)
outputs = getattr(self, 'invalid_outputs', self.outputs)
return outputs(errors) if outputs else errors
result = self.interface(**kwargs)
if self.transform:
result = self.transform(result)
return self.outputs(result) if self.outputs else result
class CLI(Interface):
"""Defines the Interface responsible for exposing functions to the CLI"""
def __init__(self, route, function):
super().__init__(route, function)
self.interface.cli = self
self.reaffirm_types = {}
use_parameters = list(self.interface.parameters)
self.additional_options = getattr(self.interface, 'arg', getattr(self.interface, 'kwarg', False))
if self.additional_options:
use_parameters.append(self.additional_options)
used_options = {'h', 'help'}
nargs_set = self.interface.takes_args or self.interface.takes_kwargs
self.parser = argparse.ArgumentParser(description=route.get('doc', self.interface.spec.__doc__))
if 'version' in route:
self.parser.add_argument('-v', '--version', action='version',
version="{0} {1}".format(route.get('name', self.interface.spec.__name__),
route['version']))
used_options.update(('v', 'version'))
for option in use_parameters:
if option in self.directives:
continue
if option in self.interface.required or option == self.additional_options:
args = (option, )
else:
short_option = option[0]
while short_option in used_options and len(short_option) < len(option):
short_option = option[:len(short_option) + 1]
used_options.add(short_option)
used_options.add(option)
if short_option != option:
args = ('-{0}'.format(short_option), '--{0}'.format(option))
else:
args = ('--{0}'.format(option), )
kwargs = {}
if option in self.defaults:
kwargs['default'] = self.defaults[option]
if option in self.interface.input_transformations:
transform = self.interface.input_transformations[option]
kwargs['type'] = transform
kwargs['help'] = transform.__doc__
if transform in (list, tuple) or isinstance(transform, types.Multiple):
kwargs['action'] = 'append'
kwargs['type'] = Text()
self.reaffirm_types[option] = transform
elif transform == bool or isinstance(transform, type(types.boolean)):
kwargs['action'] = 'store_true'
self.reaffirm_types[option] = transform
elif isinstance(transform, types.OneOf):
kwargs['choices'] = transform.values
elif (option in self.interface.spec.__annotations__ and
type(self.interface.spec.__annotations__[option]) == str):
kwargs['help'] = option
if ((kwargs.get('type', None) == bool or kwargs.get('action', None) == 'store_true') and
not kwargs['default']):
kwargs['action'] = 'store_true'
kwargs.pop('type', None)
elif kwargs.get('action', None) == 'store_true':
kwargs.pop('action', None) == 'store_true'
if option == self.additional_options:
kwargs['nargs'] = '*'
elif not nargs_set and kwargs.get('action', None) == 'append' and not option in self.interface.defaults:
kwargs['nargs'] = '*'
kwargs.pop('action', '')
nargs_set = True
self.parser.add_argument(*args, **kwargs)
self.api.cli.commands[route.get('name', self.interface.spec.__name__)] = self
@property
def outputs(self):
return getattr(self, '_outputs', hug.output_format.text)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs
def output(self, data):
"""Outputs the provided data using the transformations and output format specified for this CLI endpoint"""
if self.transform:
data = self.transform(data)
if hasattr(data, 'read'):
data = data.read().decode('utf8')
if data is not None:
data = self.outputs(data)
if data:
sys.stdout.buffer.write(data)
if not data.endswith(b'\n'):
sys.stdout.buffer.write(b'\n')
return data
def __call__(self):
"""Calls the wrapped function through the lens of a CLI ran command"""
self.api._ensure_started()
for requirement in self.requires:
conclusion = requirement(request=sys.argv, module=self.api.module)
if conclusion and conclusion is not True:
return self.output(conclusion)
if self.interface.is_method:
self.parser.prog = "%s %s" % (self.api.module.__name__, self.interface.name)
known, unknown = self.parser.parse_known_args()
pass_to_function = vars(known)
for option, directive in self.directives.items():
arguments = (self.defaults[option], ) if option in self.defaults else ()
pass_to_function[option] = directive(*arguments, api=self.api, argparse=self.parser,
interface=self)
for field, type_handler in self.reaffirm_types.items():
if field in pass_to_function:
pass_to_function[field] = type_handler(pass_to_function[field])
if getattr(self, 'validate_function', False):
errors = self.validate_function(pass_to_function)
if errors:
return self.output(errors)
if self.additional_options:
args = []
for parameter in self.interface.parameters:
if parameter in pass_to_function:
args.append(pass_to_function.pop(parameter))
args.extend(pass_to_function.pop(self.additional_options, ()))
if self.interface.takes_kwargs:
add_options_to = None
for index, option in enumerate(unknown):
if option.startswith('--'):
if add_options_to:
value = pass_to_function[add_options_to]
if len(value) == 1:
pass_to_function[add_options_to] = value[0]
elif value == []:
pass_to_function[add_options_to] = True
add_options_to = option[2:]
pass_to_function.setdefault(add_options_to, [])
elif add_options_to:
pass_to_function[add_options_to].append(option)
result = self.interface(*args, **pass_to_function)
else:
result = self.interface(**pass_to_function)
return self.output(result)
class HTTP(Interface):
"""Defines the interface responsible for wrapping functions and exposing them via HTTP based on the route"""
__slots__ = ('_params_for_outputs_state', '_params_for_invalid_outputs_state', '_params_for_transform_state',
'_params_for_on_invalid', 'set_status', 'response_headers', 'transform', 'input_transformations',
'examples', 'wrapped', 'catch_exceptions', 'parse_body', 'private', 'on_invalid', 'inputs')
AUTO_INCLUDE = {'request', 'response'}
def __init__(self, route, function, catch_exceptions=True):
super().__init__(route, function)
self.catch_exceptions = catch_exceptions
self.parse_body = 'parse_body' in route
self.set_status = route.get('status', False)
self.response_headers = tuple(route.get('response_headers', {}).items())
self.private = 'private' in route
self.inputs = route.get('inputs', {})
if 'on_invalid' in route:
self._params_for_on_invalid = introspect.takes_arguments(self.on_invalid, *self.AUTO_INCLUDE)
elif self.transform:
self._params_for_on_invalid = self._params_for_transform
if route['versions']:
self.api.http.versions.update(route['versions'])
self.interface.http = self
@property
def _params_for_outputs(self):
if not hasattr(self, '_params_for_outputs_state'):
self._params_for_outputs_state = introspect.takes_arguments(self.outputs, *self.AUTO_INCLUDE)
return self._params_for_outputs_state
@property
def _params_for_invalid_outputs(self):
if not hasattr(self, '_params_for_invalid_outputs_state'):
self._params_for_invalid_outputs_state = introspect.takes_arguments(self.invalid_outputs,
*self.AUTO_INCLUDE)
return self._params_for_invalid_outputs_state
@property
def _params_for_transform(self):
if not hasattr(self, '_params_for_transform_state'):
self._params_for_transform_state = introspect.takes_arguments(self.transform, *self.AUTO_INCLUDE)
return self._params_for_transform_state
def gather_parameters(self, request, response, api_version=None, **input_parameters):
"""Gathers and returns all parameters that will be used for this endpoint"""
input_parameters.update(request.params)
if self.parse_body and request.content_length:
body = request.stream
content_type, content_params = parse_content_type(request.content_type)
body_formatter = body and self.inputs.get(content_type, self.api.http.input_format(content_type))
if body_formatter:
body = body_formatter(body, **content_params)
if 'body' in self.all_parameters:
input_parameters['body'] = body
if isinstance(body, dict):
input_parameters.update(body)
elif 'body' in self.all_parameters:
input_parameters['body'] = None
if 'request' in self.all_parameters:
input_parameters['request'] = request
if 'response' in self.all_parameters:
input_parameters['response'] = response
if 'api_version' in self.all_parameters:
input_parameters['api_version'] = api_version
for parameter, directive in self.directives.items():
arguments = (self.defaults[parameter], ) if parameter in self.defaults else ()
input_parameters[parameter] = directive(*arguments, response=response, request=request,
api=self.api, api_version=api_version, interface=self)
return input_parameters
@property
def outputs(self):
return getattr(self, '_outputs', self.api.http.output_format)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs
def transform_data(self, data, request=None, response=None):
"""Runs the transforms specified on this endpoint with the provided data, returning the data modified"""
if self.transform and not (isinstance(self.transform, type) and isinstance(data, self.transform)):
if self._params_for_transform:
return self.transform(data, **self._arguments(self._params_for_transform, request, response))
else:
return self.transform(data)
return data
def content_type(self, request=None, response=None):
"""Returns the content type that should be used by default for this endpoint"""
if callable(self.outputs.content_type):
return self.outputs.content_type(request=request, response=response)
else:
return self.outputs.content_type
def invalid_content_type(self, request=None, response=None):
"""Returns the content type that should be used by default on validation errors"""
if callable(self.invalid_outputs.content_type):
return self.invalid_outputs.content_type(request=request, response=response)
else:
return self.invalid_outputs.content_type
def _arguments(self, requested_params, request=None, response=None):
if requested_params:
arguments = {}
if 'response' in requested_params:
arguments['response'] = response
if 'request' in requested_params:
arguments['request'] = request
return arguments
return empty.dict
def set_response_defaults(self, response, request=None):
"""Sets up the response defaults that are defined in the URL route"""
for header_name, header_value in self.response_headers:
response.set_header(header_name, header_value)
if self.set_status:
response.status = self.set_status
response.content_type = self.content_type(request, response)
def render_errors(self, errors, request, response):
data = {'errors': errors}
if getattr(self, 'on_invalid', False):
data = self.on_invalid(data, **self._arguments(self._params_for_on_invalid, request, response))
response.status = HTTP_BAD_REQUEST
if getattr(self, 'invalid_outputs', False):
response.content_type = self.invalid_content_type(request, response)
response.data = self.invalid_outputs(data, **self._arguments(self._params_for_invalid_outputs,
request, response))
else:
response.data = self.outputs(data, **self._arguments(self._params_for_outputs, request, response))
def call_function(self, parameters):
if not self.interface.takes_kwargs:
parameters = {key: value for key, value in parameters.items() if key in self.all_parameters}
return self.interface(**parameters)
def render_content(self, content, request, response, **kwargs):
if hasattr(content, 'interface') and (content.interface is True or hasattr(content.interface, 'http')):
if content.interface is True:
content(request, response, api_version=None, **kwargs)
else:
content.interface.http(request, response, api_version=None, **kwargs)
return
content = self.transform_data(content, request, response)
content = self.outputs(content, **self._arguments(self._params_for_outputs, request, response))
if hasattr(content, 'read'):
size = None
if hasattr(content, 'name') and os.path.isfile(content.name):
size = os.path.getsize(content.name)
if request.range and size:
start, end = request.range
if end < 0:
end = size + end
end = min(end, size)
length = end - start + 1
content.seek(start)
response.data = content.read(length)
response.status = falcon.HTTP_206
response.content_range = (start, end, size)
content.close()
else:
response.stream = content
if size:
response.stream_len = size
else:
response.data = content
def __call__(self, request, response, api_version=None, **kwargs):
"""Call the wrapped function over HTTP pulling information as needed"""
if isinstance(api_version, str) and api_version.isdigit():
api_version = int(api_version)
else:
api_version = None
if not self.catch_exceptions:
exception_types = ()
else:
exception_types = self.api.http.exception_handlers(api_version)
exception_types = tuple(exception_types.keys()) if exception_types else ()
try:
self.set_response_defaults(response, request)
lacks_requirement = self.check_requirements(request, response)
if lacks_requirement:
response.data = self.outputs(lacks_requirement,
**self._arguments(self._params_for_outputs, request, response))
return
input_parameters = self.gather_parameters(request, response, api_version, **kwargs)
errors = self.validate(input_parameters)
if errors:
return self.render_errors(errors, request, response)
self.render_content(self.call_function(input_parameters), request, response, **kwargs)
except falcon.HTTPNotFound:
return self.api.http.not_found(request, response, **kwargs)
except exception_types as exception:
handler = None
exception_type = type(exception)
if exception_type in exception_types:
handler = self.api.http.exception_handlers(api_version)[exception_type][0]
else:
for match_exception_type, exception_handlers in \
tuple(self.api.http.exception_handlers(api_version).items())[::-1]:
if isinstance(exception, match_exception_type):
for potential_handler in exception_handlers:
if not isinstance(exception, potential_handler.exclude):
handler = potential_handler
if not handler:
raise exception
handler(request=request, response=response, exception=exception, **kwargs)
def documentation(self, add_to=None, version=None, prefix="", base_url="", url=""):
"""Returns the documentation specific to an HTTP interface"""
doc = OrderedDict() if add_to is None else add_to
usage = self.interface.spec.__doc__
if usage:
doc['usage'] = usage
for example in self.examples:
example_text = "{0}{1}{2}{3}".format(prefix, base_url, '/v{0}'.format(version) if version else '', url)
if isinstance(example, str):
example_text += "?{0}".format(example)
doc_examples = doc.setdefault('examples', [])
if not example_text in doc_examples:
doc_examples.append(example_text)
doc = super().documentation(doc)
if getattr(self, 'output_doc', ''):
doc['outputs']['type'] = self.output_doc
return doc
@lru_cache()
def urls(self, version=None):
"""Returns all URLS that are mapped to this interface"""
urls = []
for base_url, routes in self.api.http.routes.items():
for url, methods in routes.items():
for method, versions in methods.items():
for interface_version, interface in versions.items():
if interface_version == version and interface == self:
if not url in urls:
urls.append(('/v{0}'.format(version) if version else '') + url)
return urls
def url(self, version=None, **kwargs):
"""Returns the first matching URL found for the specified arguments"""
for url in self.urls(version):
if [key for key in kwargs.keys() if not '{' + key + '}' in url]:
continue
return url.format(**kwargs)
raise KeyError('URL that takes all provided parameters not found')
class ExceptionRaised(HTTP):
"""Defines the interface responsible for taking and transforming exceptions that occur during processing"""
__slots__ = ('handle', 'exclude')
def __init__(self, route, *args, **kwargs):
self.handle = route['exceptions']
self.exclude = route['exclude']
super().__init__(route, *args, **kwargs)
| |
# -*- coding: utf-8 -*-
"""
Exception and warning classes used throughout the framework.
Error: Base class, all exceptions should the subclass of this class.
- NoUsername: Username is not in user-config.py, or it is invalid.
- UserBlocked: Username or IP has been blocked
- AutoblockUser: requested action on a virtual autoblock user not valid
- UserRightsError: insufficient rights for requested action
- BadTitle: Server responded with BadTitle
- InvalidTitle: Invalid page title
- CaptchaError: Captcha is asked and config.solve_captcha == False
- Server504Error: Server timed out with HTTP 504 code
- PageNotFound: Page not found (deprecated)
- i18n.TranslationError: i18n/l10n message not available
- UnknownExtension: Extension is not defined for this site
SiteDefinitionError: Site loading problem
- UnknownSite: Site does not exist in Family
- UnknownFamily: Family is not registered
PageRelatedError: any exception which is caused by an operation on a Page.
- NoPage: Page does not exist
- IsRedirectPage: Page is a redirect page
- IsNotRedirectPage: Page is not a redirect page
- CircularRedirect: Page is a circular redirect
- InterwikiRedirectPage: Page is a redirect to another site
- SectionError: The section specified by # does not exist
- NotEmailableError: The target user has disabled email
- NoMoveTarget: An expected move target page does not exist
PageSaveRelatedError: page exceptions within the save operation on a Page
(alias: PageNotSaved).
- SpamfilterError: MediaWiki spam filter detected a blacklisted URL
- OtherPageSaveError: misc. other save related exception.
- LockedPage: Page is locked
- LockedNoPage: Title is locked against creation
- CascadeLockedPage: Page is locked due to cascading protection
- EditConflict: Edit conflict while uploading the page
- PageDeletedConflict: Page was deleted since being retrieved
- PageCreatedConflict: Page was created by another user
- ArticleExistsConflict: Page article already exists
- NoCreateError: parameter nocreate not allow page creation
ServerError: a problem with the server.
- FatalServerError: A fatal/non-recoverable server error
WikiBaseError: any issue specific to Wikibase.
- CoordinateGlobeUnknownException: globe is not implemented yet.
- EntityTypeUnknownException: entity type is not available on the site.
DeprecationWarning: old functionality replaced by new functionality
PendingDeprecationWarning: problematic code which has not yet been
fully deprecated, possibly because a replacement is not available
RuntimeWarning: problems developers should have fixed, and users need to
be aware of its status.
- tools._NotImplementedWarning: do not use
- NotImplementedWarning: functionality not implemented
UserWarning: warnings targetted at users
- config2._ConfigurationDeprecationWarning: user configuration file problems
- login._PasswordFileWarning: password file problems
- ArgumentDeprecationWarning: command line argument problems
- FamilyMaintenanceWarning: missing information in family definition
"""
#
# (C) Pywikibot team, 2008
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import sys
from pywikibot.tools import UnicodeMixin, _NotImplementedWarning
if sys.version_info[0] > 2:
unicode = str
class NotImplementedWarning(_NotImplementedWarning):
"""Feature that is no longer implemented."""
pass
class ArgumentDeprecationWarning(UserWarning):
"""Command line argument that is no longer supported."""
pass
class FamilyMaintenanceWarning(UserWarning):
"""Family class is missing definitions."""
pass
class Error(UnicodeMixin, Exception): # noqa
"""Pywikibot error"""
# NOTE: UnicodeMixin must be the first object Error class is derived from.
def __init__(self, arg):
"""Constructor."""
self.unicode = arg
def __unicode__(self):
"""Return a unicode string representation."""
return self.unicode
class PageRelatedError(Error):
"""
Abstract Exception, used when the exception concerns a particular Page.
This class should be used when the Exception concerns a particular
Page, and when a generic message can be written once for all.
"""
# Preformatted UNICODE message where the page title will be inserted
# Override this in subclasses.
# u"Oh noes! Page %s is too funky, we should not delete it ;("
message = None
def __init__(self, page, message=None):
"""
Constructor.
@param page: Page that caused the exception
@type page: Page object
"""
if message:
self.message = message
if self.message is None:
raise Error("PageRelatedError is abstract. Can't instantiate it!")
self.page = page
self.title = page.title(asLink=True)
self.site = page.site
if '%(' in self.message and ')s' in self.message:
super(PageRelatedError, self).__init__(self.message % self.__dict__)
else:
super(PageRelatedError, self).__init__(self.message % page)
def getPage(self):
"""Return the page related to the exception."""
return self.page
class PageSaveRelatedError(PageRelatedError): # noqa
"""Saving the page has failed"""
message = u"Page %s was not saved."
# This property maintains backwards compatibility with
# the old PageNotSaved which inherited from Error
# (not PageRelatedError) and exposed the normal 'args'
# which could be printed
@property
def args(self):
"""Expose args."""
return unicode(self)
class OtherPageSaveError(PageSaveRelatedError):
"""Saving the page has failed due to uncatchable error."""
message = "Edit to page %(title)s failed:\n%(reason)s"
def __init__(self, page, reason):
"""Constructor.
@param reason: Details of the problem
@type reason: Exception or basestring
"""
self.reason = reason
super(OtherPageSaveError, self).__init__(page)
@property
def args(self):
"""Expose args."""
return unicode(self.reason)
class NoUsername(Error):
"""Username is not in user-config.py."""
pass
class NoPage(PageRelatedError): # noqa
"""Page does not exist"""
message = u"Page %s doesn't exist."
pass
class NoMoveTarget(PageRelatedError):
"""Expected move target page not found."""
message = "Move target page of %s not found."
pass
class SiteDefinitionError(Error): # noqa
"""Site does not exist"""
pass
# The name 'NoSuchSite' was used for all site related issues,
# and it used message "Site does not exist".
# These are retain for backwards compatibility with scripts.
NoSuchSite = SiteDefinitionError
class UnknownSite(SiteDefinitionError): # noqa
"""Site does not exist in Family"""
pass
class UnknownFamily(SiteDefinitionError): # noqa
"""Family is not registered"""
pass
class UnknownExtension(Error, NotImplementedError):
"""Extension is not defined."""
pass
class IsRedirectPage(PageRelatedError): # noqa
"""Page is a redirect page"""
message = u"Page %s is a redirect page."
pass
class IsNotRedirectPage(PageRelatedError): # noqa
"""Page is not a redirect page"""
message = u"Page %s is not a redirect page."
pass
class CircularRedirect(PageRelatedError):
"""Page is a circular redirect.
Exception argument is the redirect target; this may be the same title
as this page or a different title (in which case the target page directly
or indirectly redirects back to this one)
"""
message = u"Page %s is a circular redirect."
class InterwikiRedirectPage(PageRelatedError):
"""
Page is a redirect to another site.
This is considered invalid in Pywikibot. See Bug 73184.
"""
message = (u"Page redirects to a page on another Site.\n"
u"Page: %(page)s\n"
u"Target page: %(target_page)s on %(target_site)s.")
def __init__(self, page, target_page):
"""Constructor.
@param target_page: Target page of the redirect.
@type reason: Page
"""
self.target_page = target_page
self.target_site = target_page.site
super(InterwikiRedirectPage, self).__init__(page)
class InvalidTitle(Error): # noqa
"""Invalid page title"""
pass
class LockedPage(PageSaveRelatedError): # noqa
"""Page is locked"""
message = u"Page %s is locked."
pass
class LockedNoPage(LockedPage): # noqa
"""Title is locked against creation"""
message = u"Page %s does not exist and is locked preventing creation."
pass
class CascadeLockedPage(LockedPage): # noqa
"""Page is locked due to cascading protection"""
message = u"Page %s is locked due to cascading protection."
pass
class SectionError(Error): # noqa
"""The section specified by # does not exist"""
pass
PageNotSaved = PageSaveRelatedError
class NoCreateError(PageSaveRelatedError):
"""Parameter nocreate doesn't allow page creation."""
message = u"Page %s could not be created due to parameter nocreate"
pass
class EditConflict(PageSaveRelatedError): # noqa
"""There has been an edit conflict while uploading the page"""
message = u"Page %s could not be saved due to an edit conflict"
pass
class PageDeletedConflict(EditConflict): # noqa
"""Page was deleted since being retrieved"""
message = u"Page %s has been deleted since last retrieved."
pass
class PageCreatedConflict(EditConflict): # noqa
"""Page was created by another user"""
message = u"Page %s has been created since last retrieved."
pass
class ArticleExistsConflict(EditConflict):
"""Page already exists."""
message = u"Destination article %s already exists and is not a redirect to the source article"
pass
class SpamfilterError(PageSaveRelatedError):
"""Page save failed because MediaWiki detected a blacklisted spam URL."""
message = "Edit to page %(title)s rejected by spam filter due to content:\n%(url)s"
def __init__(self, page, url):
"""Constructor."""
self.url = url
super(SpamfilterError, self).__init__(page)
class ServerError(Error): # noqa
"""Got unexpected server response"""
pass
class FatalServerError(ServerError):
"""A fatal server error will not be corrected by resending the request."""
pass
class Server504Error(Error): # noqa
"""Server timed out with HTTP 504 code"""
pass
class Server414Error(Error):
"""Server returned with HTTP 414 code."""
pass
class BadTitle(Error):
"""Server responded with BadTitle."""
# UserBlocked exceptions should in general not be caught. If the bot has
# been blocked, the bot operator should address the reason for the block
# before continuing.
pass
class UserBlocked(Error): # noqa
"""Your username or IP has been blocked"""
pass
class CaptchaError(Error):
"""Captcha is asked and config.solve_captcha == False."""
pass
class AutoblockUser(Error):
"""Requested action on a virtual autoblock user not valid.
The class AutoblockUserError is an exception that is raised whenever
an action is requested on a virtual autoblock user that's not available
for him (i.e. roughly everything except unblock).
"""
pass
class UserRightsError(Error):
"""Insufficient user rights to perform an action."""
pass
class NotEmailableError(PageRelatedError):
"""This user is not emailable."""
message = "%s is not emailable."
pass
class WikiBaseError(Error):
"""Wikibase related error."""
pass
class CoordinateGlobeUnknownException(WikiBaseError, NotImplementedError):
"""This globe is not implemented yet in either WikiBase or pywikibot."""
pass
class EntityTypeUnknownException(WikiBaseError):
"""The requested entity type is not recognised on this site."""
pass
import pywikibot.data.api
import pywikibot.tools
@pywikibot.tools.deprecated
class DeprecatedPageNotFoundError(Error):
"""Page not found (deprecated)."""
pass
@pywikibot.tools.deprecated
class _EmailUserError(UserRightsError, NotEmailableError):
"""Email related error."""
pass
wrapper = pywikibot.tools.ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('UploadWarning', pywikibot.data.api.UploadWarning)
wrapper._add_deprecated_attr('PageNotFound', DeprecatedPageNotFoundError,
warning_message='{0}.{1} is deprecated, and no '
'longer used by pywikibot; use '
'http.fetch() instead.')
wrapper._add_deprecated_attr(
'UserActionRefuse', _EmailUserError,
warning_message='UserActionRefuse is deprecated; '
'use UserRightsError and/or NotEmailableError')
| |
"""Test cases for the hosting service client support."""
from kgb import SpyAgency
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.service import (HostingService,
HostingServiceClient,
HostingServiceHTTPRequest,
HostingServiceHTTPResponse)
from reviewboard.testing.testcase import TestCase
class DummyHTTPRequest(HostingServiceHTTPRequest):
def open(self):
method = self.method
if method in ('DELETE', 'HEAD'):
data = None
else:
data = b'{"key": "test response"}'
if method == 'DELETE':
status_code = 204
elif method == 'POST':
status_code = 201
else:
status_code = 200
return HostingServiceHTTPResponse(
request=self,
url=self.url,
data=data,
headers={
str('Test-header'): str('Value'),
},
status_code=status_code)
class HostingServiceHTTPRequestTests(TestCase):
"""Unit tests for HostingServiceHTTPRequest."""
def test_init_with_query(self):
"""Testing HostingServiceHTTPRequest construction with query="""
request = HostingServiceHTTPRequest(
url='http://example.com?z=1&z=2&baz=true',
query={
'foo': 'bar',
'a': 10,
'list': ['a', 'b', 'c'],
})
self.assertEqual(
request.url,
'http://example.com?a=10&baz=true&foo=bar&list=a&list=b&list=c'
'&z=1&z=2')
def test_init_with_body_not_bytes(self):
"""Testing HostingServiceHTTPRequest construction with non-bytes body
"""
account = HostingServiceAccount()
service = HostingService(account)
expected_message = (
'Received non-bytes body for the HTTP request for %r. This is '
'likely an implementation problem. Please make sure only byte '
'strings are sent for the request body.'
% HostingService
)
with self.assertRaisesMessage(TypeError, expected_message):
HostingServiceHTTPRequest(
url='http://example.com?z=1&z=2&baz=true',
method='POST',
body=123,
hosting_service=service)
def test_init_with_header_key_not_unicode(self):
"""Testing HostingServiceHTTPRequest construction with non-Unicode
header key
"""
account = HostingServiceAccount()
service = HostingService(account)
expected_message = (
'Received non-Unicode header %r (value=%r) for the HTTP request '
'for %r. This is likely an implementation problem. Please make '
'sure only Unicode strings are sent in request headers.'
% (b'My-Header', 'abc', HostingService)
)
with self.assertRaisesMessage(TypeError, expected_message):
HostingServiceHTTPRequest(
url='http://example.com?z=1&z=2&baz=true',
method='POST',
headers={
b'My-Header': 'abc',
},
hosting_service=service)
def test_init_with_header_value_not_unicode(self):
"""Testing HostingServiceHTTPRequest construction with non-Unicode
header value
"""
account = HostingServiceAccount()
service = HostingService(account)
expected_message = (
'Received non-Unicode header %r (value=%r) for the HTTP request '
'for %r. This is likely an implementation problem. Please make '
'sure only Unicode strings are sent in request headers.'
% ('My-Header', b'abc', HostingService)
)
with self.assertRaisesMessage(TypeError, expected_message):
HostingServiceHTTPRequest(
url='http://example.com?z=1&z=2&baz=true',
method='POST',
headers={
'My-Header': b'abc',
},
hosting_service=service)
def test_add_basic_auth(self):
"""Testing HostingServiceHTTPRequest.add_basic_auth"""
request = HostingServiceHTTPRequest('http://example.com')
request.add_basic_auth(b'username', b'password')
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
})
def test_get_header(self):
"""Testing HostingServiceHTTPRequest.get_header"""
request = HostingServiceHTTPRequest(
'http://example.com',
headers={
'Authorization': 'Basic abc123',
'Content-Length': '123',
})
self.assertEqual(request.get_header('Authorization'), 'Basic abc123')
self.assertEqual(request.get_header('AUTHORIZATION'), 'Basic abc123')
self.assertEqual(request.get_header('authorization'), 'Basic abc123')
self.assertEqual(request.get_header('Content-Length'), '123')
self.assertEqual(request.get_header('CONTENT-LENGTH'), '123')
self.assertEqual(request.get_header('content-length'), '123')
class HostingServiceHTTPResponseTests(TestCase):
"""Unit tests for HostingServiceHTTPResponse."""
def test_json(self):
"""Testing HostingServiceHTTPResponse.json"""
request = HostingServiceHTTPRequest('http://example.com')
response = HostingServiceHTTPResponse(request=request,
url='http://example.com',
data=b'{"a": 1, "b": 2}',
headers={},
status_code=200)
self.assertEqual(
response.json,
{
'a': 1,
'b': 2,
})
def test_json_with_non_json_response(self):
"""Testing HostingServiceHTTPResponse.json with non-JSON response"""
request = HostingServiceHTTPRequest('http://example.com')
response = HostingServiceHTTPResponse(request=request,
url='http://example.com',
data=b'{[(',
headers={},
status_code=200)
with self.assertRaises(ValueError):
response.json
def test_get_header(self):
"""Testing HostingServiceHTTPRequest.get_header"""
request = HostingServiceHTTPRequest('http://example.com')
response = HostingServiceHTTPResponse(
request=request,
url=request.url,
status_code=200,
data=b'',
headers={
str('Authorization'): str('Basic abc123'),
str('Content-Length'): str('123'),
})
self.assertEqual(response.get_header('Authorization'), 'Basic abc123')
self.assertEqual(response.get_header('AUTHORIZATION'), 'Basic abc123')
self.assertEqual(response.get_header('authorization'), 'Basic abc123')
self.assertEqual(response.get_header('Content-Length'), '123')
self.assertEqual(response.get_header('CONTENT-LENGTH'), '123')
self.assertEqual(response.get_header('content-length'), '123')
class HostingServiceClientTests(SpyAgency, TestCase):
"""Unit tests for HostingServiceClient"""
def setUp(self):
super(HostingServiceClientTests, self).setUp()
account = HostingServiceAccount()
service = HostingService(account)
self.client = HostingServiceClient(service)
self.client.http_request_cls = DummyHTTPRequest
def test_http_delete(self):
"""Testing HostingServiceClient.http_delete"""
self.spy_on(self.client.build_http_request)
response = self.client.http_delete(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertIsNone(response.data)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.status_code, 204)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
credentials={
'username': 'username',
'password': 'password',
})
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'DELETE')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_http_get(self):
"""Testing HostingServiceClient.http_get"""
self.spy_on(self.client.build_http_request)
response = self.client.http_get(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'{"key": "test response"}')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
method='GET',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'GET')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_http_head(self):
"""Testing HostingServiceClient.http_head"""
self.spy_on(self.client.build_http_request)
response = self.client.http_head(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertIsNone(response.data)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
method='HEAD',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'HEAD')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_http_post_with_body_unicode(self):
"""Testing HostingServiceClient.http_post with body as Unicode"""
self.spy_on(self.client.build_http_request)
response = self.client.http_post(
url='http://example.com',
body='test body\U0001f60b',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'{"key": "test response"}')
self.assertEqual(response.status_code, 201)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test body\xf0\x9f\x98\x8b',
headers={
'Content-Length': '13',
'Foo': 'bar',
},
method='POST',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'POST')
self.assertEqual(request.data, b'test body\xf0\x9f\x98\x8b')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '13',
'Foo': 'bar',
})
def test_http_post_with_body_bytes(self):
"""Testing HostingServiceClient.http_post with body as bytes"""
self.spy_on(self.client.build_http_request)
response = self.client.http_post(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'{"key": "test response"}')
self.assertEqual(response.status_code, 201)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Content-Length': '12',
'Foo': 'bar',
},
method='POST',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'POST')
self.assertEqual(request.data, b'test body\x01\x02\x03')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '12',
'Foo': 'bar',
})
def test_http_put_with_body_unicode(self):
"""Testing HostingServiceClient.http_put with body as Unicode"""
self.spy_on(self.client.build_http_request)
response = self.client.http_put(
url='http://example.com',
body='test body\U0001f60b',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'{"key": "test response"}')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test body\xf0\x9f\x98\x8b',
headers={
'Content-Length': '13',
'Foo': 'bar',
},
method='PUT',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'PUT')
self.assertEqual(request.data, b'test body\xf0\x9f\x98\x8b')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '13',
'Foo': 'bar',
})
def test_http_put_with_body_bytes(self):
"""Testing HostingServiceClient.http_put with body as bytes"""
self.spy_on(self.client.build_http_request)
response = self.client.http_put(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'{"key": "test response"}')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Content-Length': '12',
'Foo': 'bar',
},
method='PUT',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'PUT')
self.assertEqual(request.data, b'test body\x01\x02\x03')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '12',
'Foo': 'bar',
})
def test_http_request(self):
"""Testing HostingServiceClient.http_request"""
self.spy_on(self.client.build_http_request)
response = self.client.http_request(
url='http://example.com',
body=b'test',
headers={
'Foo': 'bar',
},
method='BAZ',
username='username',
password='password')
self.assertIsInstance(response, HostingServiceHTTPResponse)
self.assertEqual(response.url, 'http://example.com')
self.assertEqual(response.data, b'{"key": "test response"}')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.headers, dict)
self.assertEqual(
response.headers,
{
str('Test-header'): str('Value'),
})
data, headers = response
self.assertEqual(data, response.data)
self.assertEqual(headers, response.headers)
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test',
headers={
'Foo': 'bar',
},
method='BAZ',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'BAZ')
self.assertEqual(request.data, b'test')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_build_http_request(self):
"""Testing HostingServiceClient.build_http_request"""
request = self.client.build_http_request(
url='http://example.com',
body=b'test',
method='POST',
credentials={},
headers={
'Foo': 'bar',
})
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.data, b'test')
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.headers,
{
'Foo': 'bar',
})
def test_build_http_request_with_basic_auth(self):
"""Testing HostingServiceClient.build_http_request with username and
password
"""
request = self.client.build_http_request(
url='http://example.com',
body=b'test',
method='POST',
headers={
'Foo': 'bar',
},
credentials={
'username': 'username',
'password': 'password',
})
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.data, b'test')
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_json_delete(self):
"""Testing HostingServiceClient.json_delete"""
self.spy_on(self.client.build_http_request)
rsp, headers = self.client.json_delete(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertIsNone(rsp)
self.assertIsInstance(headers, dict)
self.assertEqual(
headers,
{
str('Test-header'): str('Value'),
})
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
credentials={
'username': 'username',
'password': 'password',
})
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'DELETE')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_json_get(self):
"""Testing HostingServiceClient.json_get"""
self.spy_on(self.client.build_http_request)
rsp, headers = self.client.json_get(
url='http://example.com',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertEqual(
rsp,
{
'key': 'test response',
})
self.assertIsInstance(headers, dict)
self.assertEqual(
headers,
{
str('Test-header'): str('Value'),
})
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=None,
headers={
'Foo': 'bar',
},
method='GET',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertIsNone(request.data)
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'GET')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Foo': 'bar',
})
def test_json_post_with_body_unicode(self):
"""Testing HostingServiceClient.json_post with body as Unicode"""
self.spy_on(self.client.build_http_request)
rsp, headers = self.client.json_post(
url='http://example.com',
body='test body\U0001f60b',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertEqual(
rsp,
{
'key': 'test response',
})
self.assertIsInstance(headers, dict)
self.assertEqual(
headers,
{
str('Test-header'): str('Value'),
})
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test body\xf0\x9f\x98\x8b',
headers={
'Content-Length': '13',
'Foo': 'bar',
},
method='POST',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'POST')
self.assertEqual(request.data, b'test body\xf0\x9f\x98\x8b')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '13',
'Foo': 'bar',
})
def test_json_post_with_body_bytes(self):
"""Testing HostingServiceClient.json_post with body as bytes"""
self.spy_on(self.client.build_http_request)
rsp, headers = self.client.json_post(
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Foo': 'bar',
},
username='username',
password='password')
self.assertEqual(
rsp,
{
'key': 'test response',
})
self.assertIsInstance(headers, dict)
self.assertEqual(
headers,
{
str('Test-header'): str('Value'),
})
self.assertSpyCalledWith(
self.client.build_http_request,
url='http://example.com',
body=b'test body\x01\x02\x03',
headers={
'Content-Length': '12',
'Foo': 'bar',
},
method='POST',
username='username',
password='password')
request = self.client.build_http_request.last_call.return_value
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.method, 'POST')
self.assertEqual(request.data, b'test body\x01\x02\x03')
self.assertIsInstance(request.headers, dict)
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-length': '12',
'Foo': 'bar',
})
| |
# coding=utf-8
# Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvNext model."""
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
from ...modeling_tf_utils import (
TFModelInputType,
TFPreTrainedModel,
TFSequenceClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
)
from ...utils import logging
from .configuration_convnext import ConvNextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ConvNextConfig"
_CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
class TFConvNextDropPath(tf.keras.layers.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
References:
(1) github.com:rwightman/pytorch-image-models
"""
def __init__(self, drop_path, **kwargs):
super().__init__(**kwargs)
self.drop_path = drop_path
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_path
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
class TFConvNextEmbeddings(tf.keras.layers.Layer):
"""This class is comparable to (and inspired by) the SwinEmbeddings class
found in src/transformers/models/swin/modeling_swin.py.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.patch_embeddings = tf.keras.layers.Conv2D(
filters=config.hidden_sizes[0],
kernel_size=config.patch_size,
strides=config.patch_size,
name="patch_embeddings",
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
)
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
def call(self, pixel_values):
if isinstance(pixel_values, dict):
pixel_values = pixel_values["pixel_values"]
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.layernorm(embeddings)
return embeddings
class TFConvNextLayer(tf.keras.layers.Layer):
"""This corresponds to the `Block` class in the original implementation.
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
NHWC ordering, we can just apply the operations straight-away without the permutation.
Args:
config ([`ConvNextConfig`]): Model configuration class.
dim (`int`): Number of input channels.
drop_path (`float`): Stochastic depth rate. Default: 0.0.
"""
def __init__(self, config, dim, drop_path=0.0, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.config = config
self.dwconv = tf.keras.layers.Conv2D(
filters=dim,
kernel_size=7,
padding="same",
groups=dim,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="dwconv",
) # depthwise conv
self.layernorm = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
name="layernorm",
)
self.pwconv1 = tf.keras.layers.Dense(
units=4 * dim,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="pwconv1",
) # pointwise/1x1 convs, implemented with linear layers
self.act = get_tf_activation(config.hidden_act)
self.pwconv2 = tf.keras.layers.Dense(
units=dim,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="pwconv2",
)
# Using `layers.Activation` instead of `tf.identity` to better control `training`
# behaviour.
self.drop_path = (
TFConvNextDropPath(drop_path, name="drop_path")
if drop_path > 0.0
else tf.keras.layers.Activation("linear", name="drop_path")
)
def build(self, input_shape: tf.TensorShape):
# PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
self.layer_scale_parameter = (
self.add_weight(
shape=(self.dim,),
initializer=tf.keras.initializers.Constant(value=self.config.layer_scale_init_value),
trainable=True,
name="layer_scale_parameter",
)
if self.config.layer_scale_init_value > 0
else None
)
super().build(input_shape)
def call(self, hidden_states, training=False):
input = hidden_states
x = self.dwconv(hidden_states)
x = self.layernorm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.layer_scale_parameter is not None:
x = self.layer_scale_parameter * x
x = input + self.drop_path(x, training=training)
return x
class TFConvNextStage(tf.keras.layers.Layer):
"""ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
Args:
config ([`ConvNextConfig`]): Model configuration class.
in_channels (`int`): Number of input channels.
out_channels (`int`): Number of output channels.
depth (`int`): Number of residual blocks.
drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
"""
def __init__(
self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None, **kwargs
):
super().__init__(**kwargs)
if in_channels != out_channels or stride > 1:
self.downsampling_layer = [
tf.keras.layers.LayerNormalization(
epsilon=1e-6,
name="downsampling_layer.0",
),
# Inputs to this layer will follow NHWC format since we
# transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
# layer. All the outputs throughout the model will be in NHWC
# from this point on until the output where we again change to
# NCHW.
tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=stride,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="downsampling_layer.1",
),
]
else:
self.downsampling_layer = [tf.identity]
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = [
TFConvNextLayer(
config,
dim=out_channels,
drop_path=drop_path_rates[j],
name=f"layers.{j}",
)
for j in range(depth)
]
def call(self, hidden_states):
for layer in self.downsampling_layer:
hidden_states = layer(hidden_states)
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
class TFConvNextEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.stages = []
drop_path_rates = [x for x in tf.linspace(0.0, config.drop_path_rate, sum(config.depths))]
cur = 0
prev_chs = config.hidden_sizes[0]
for i in range(config.num_stages):
out_chs = config.hidden_sizes[i]
stage = TFConvNextStage(
config,
in_channels=prev_chs,
out_channels=out_chs,
stride=2 if i > 0 else 1,
depth=config.depths[i],
drop_path_rates=drop_path_rates[cur],
name=f"stages.{i}",
)
self.stages.append(stage)
cur += config.depths[i]
prev_chs = out_chs
def call(self, hidden_states, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.stages):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
@keras_serializable
class TFConvNextMainLayer(tf.keras.layers.Layer):
config_class = ConvNextConfig
def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
self.encoder = TFConvNextEncoder(config, name="encoder")
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
# We are setting the `data_format` like so because from here on we will revert to the
# NCHW output format
self.pooler = tf.keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if "input_ids" in inputs:
inputs["pixel_values"] = inputs.pop("input_ids")
if inputs["pixel_values"] is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(inputs["pixel_values"], training=inputs["training"])
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=inputs["training"],
)
last_hidden_state = encoder_outputs[0]
# Change to NCHW output format have uniformity in the modules
last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
pooled_output = self.layernorm(self.pooler(last_hidden_state))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
)
class TFConvNextPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvNextConfig
base_model_prefix = "convnext"
main_input_name = "pixel_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(
shape=(
3,
self.config.num_channels,
self.config.image_size,
self.config.image_size,
),
dtype=tf.float32,
)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
return self.call(inputs)
CONVNEXT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
</Tip>
Parameters:
config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVNEXT_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See
[`ConvNextFeatureExtractor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
"""
@add_start_docstrings(
"The bare ConvNext model outputting raw features without any specific head on top.",
CONVNEXT_START_DOCSTRING,
)
class TFConvNextModel(TFConvNextPreTrainedModel):
def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
r"""
Returns:
Examples:
```python
>>> from transformers import ConvNextFeatureExtractor, TFConvNextModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ConvNextFeatureExtractor.from_pretrained("facebook/convnext-tiny-224")
>>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
>>> inputs = feature_extractor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if "input_ids" in inputs:
inputs["pixel_values"] = inputs.pop("input_ids")
if inputs["pixel_values"] is None:
raise ValueError("You have to specify pixel_values")
outputs = self.convnext(
pixel_values=inputs["pixel_values"],
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=inputs["training"],
)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=outputs.last_hidden_state,
pooler_output=outputs.pooler_output,
hidden_states=outputs.hidden_states,
)
@add_start_docstrings(
"""
ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
CONVNEXT_START_DOCSTRING,
)
class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convnext = TFConvNextMainLayer(config, name="convnext")
# Classifier head
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="classifier",
)
@add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import ConvNextFeatureExtractor, TFConvNextForImageClassification
>>> import tensorflow as tf
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ConvNextFeatureExtractor.from_pretrained("facebook/convnext-tiny-224")
>>> model = TFViTForImageClassification.from_pretrained("facebook/convnext-tiny-224")
>>> inputs = feature_extractor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
>>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
```"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if "input_ids" in inputs:
inputs["pixel_values"] = inputs.pop("input_ids")
if inputs["pixel_values"] is None:
raise ValueError("You have to specify pixel_values")
outputs = self.convnext(
inputs["pixel_values"],
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=inputs["training"],
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None if inputs["labels"] is None else self.hf_compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a AmsterdamCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a AmsterdamCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
#!/usr/bin/env python
"""
Differential evolution test program.
Usage:
de.py [options]
Options:
-h, --help Show this message and exit.
-n N Number of generations in DE. [default: 20]
--print-level LEVEL
Print verbose level. [default: 1]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
from numpy import exp, sin, cos
import random
import copy
from multiprocessing import Process, Pool
from time import time
__author__ = "RYO KOBAYASHI"
__version__ = "190904"
_fname_gen = 'out.de.generations'
_fname_ind = 'out.de.individuals'
def test_func(var, vranges, **kwargs):
x,y= var
res= x**2 +y**2 +100.0*exp(-x**2 -y**2)*sin(2.0*(x+y))*cos(2*(x-y)) \
+80.0*exp(-(x-1)**2 -(y-1)**2)*cos(x+4*y)*sin(2*x-y) \
+200.0*sin(x+y)*exp(-(x-3)**2-(y-1)**2)
return res
def test_write_func(vs,vrs,fname,**kwargs):
with open(fname,'w') as f:
for i,v in enumerate(vs):
vr = vrs[i]
f.write(' {0:10.3f} {1:10.3f} {2:10.3f}\n'.format(v,*vr))
return None
def wrap(vs,vrs):
vsnew = copy.copy(vs)
for i,v in enumerate(vsnew):
vmin, vmax = vrs[i]
vsnew[i] = min(max(v,vmin),vmax)
return vsnew
class Individual:
"""
Individual class that consists of variables as vector elements.
"""
def __init__(self, iid, ndim, vranges, loss_func):
self.iid = iid
self.ndim = ndim
self.loss_func = loss_func
self.vector = np.zeros(self.ndim)
self.vranges = vranges
self.val = None
def set_variable(self,variables):
if len(variables) != len(self.vector):
raise ValueError()
self.vector = variables
# print('iid, v before wrap,vrs =',self.iid,self.vector,self.vranges)
self.wrap_range()
# print('iid, v after wrap,vrs =',self.iid,self.vector,self.vranges)
self.val = None
return None
def init_random(self):
for i in range(self.ndim):
vmin, vmax = self.vranges[i]
# vmin = self.vranges[i,0]
# vmax = self.vranges[i,1]
v = random.random()*(vmax -vmin) +vmin
self.vector[i] = v
# print(' i,vmin,vmax,v=',i,vmin,vmax,v)
self.wrap_range()
self.val = None
return None
def wrap_range(self):
self.vector = wrap(self.vector, self.vranges)
def calc_loss_func(self,kwargs):
"""
Compute loss function value using self.loss_func function given in the constructor.
In order to return a result in multiprocessing.Process, it also takes an argument q.
"""
# print('type(kwargs)=',type(kwargs))
val = self.loss_func(self.vector, **kwargs)
# print(' iid,v,val=',self.iid,self.vector,val)
# q.put(val)
return val, kwargs['index']
class DE:
"""
Differential evolution class.
"""
def __init__(self, N, F, CR, T, variables, vranges, loss_func, write_func,
nproc=0,**kwargs):
"""
Conctructor of DE class.
loss_func:
Loss function to be minimized with variables and **kwargs.
nproc:
Number of processes used to run N individuals.
"""
if N < 4:
raise ValueError('N must be greater than 3 in DE!')
self.N = N # Number of individuals in a generation
self.F = F # Fraction of mixing in DE
self.CR = CR # Cross-over rate
self.T = T # Temperature (kT) to compute adoption probability
self.nproc = nproc
# if self.T < 1e-10:
# raise ValueError('T is too small.')
self.ndim = len(variables)
self.vs = variables
self.vrs = vranges
# print('original variables=',self.vs,self.vrs)
self.loss_func = loss_func
self.write_func = write_func
self.kwargs = kwargs
self.bestind = None
self.print_level = 0
if 'print_level' in kwargs.keys():
self.print_level = kwargs['print_level']
#...initialize population
self.population = []
self.iidmax = 0
for i in range(N):
self.iidmax += 1
ind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
if i == 0:
ind.set_variable(self.vs)
else:
ind.init_random()
self.population.append(ind)
#...Evaluate loss function values
# qs = [ Queue() for i in range(self.N) ]
prcs = []
if self.nproc > 0 : # use specified number of cores by nproc
pool = Pool(processes=self.nproc)
else:
pool = Pool()
for ip,pi in enumerate(self.population):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ip
kwtmp['iid'] = pi.iid
# prcs.append(Process(target=pi.calc_loss_func, args=(kwtmp,qs[ip])))
prcs.append(pool.apply_async(pi.calc_loss_func, (kwtmp,)))
results = [ res.get() for res in prcs ]
for res in results:
val,ip = res
self.population[ip].val = val
self.keep_best()
if self.print_level > 2:
for pi in self.population:
self.write_variables(pi,
fname='in.vars.fitpot.{0:d}'.format(pi.iid),
**self.kwargs)
else:
self.write_variables(self.bestind,
fname='in.vars.fitpot.{0:d}'.format(self.bestind.iid),
**self.kwargs)
return None
def keep_best(self):
vals = []
for i,pi in enumerate(self.population):
# print('i,val,vec=',i,pi.val,pi.vector)
if pi.val == None:
raise ValueError('Something went wrong.')
vals.append(pi.val)
minval = min(vals)
if self.bestind == None or minval < self.bestind.val:
idx = vals.index(minval)
self.bestind = copy.deepcopy(self.population[idx])
return None
def run(self,maxiter=100):
"""
Perfom DE.
"""
if 'start' in self.kwargs.keys():
start = self.kwargs['start']
else:
start = time()
fgen = open(_fname_gen,'w')
find = open(_fname_ind,'w')
for i,ind in enumerate(self.population):
fgen.write(' 0 {0:8d} {1:12.4e}\n'.format(ind.iid, ind.val))
find.write(' {0:8d} {1:12.4e}'.format(ind.iid, ind.val))
for j,vj in enumerate(ind.vector):
find.write(' {0:11.3e}'.format(vj))
find.write('\n')
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(0, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for it in range(maxiter):
candidates = []
#...Create candidates
for ip,pi in enumerate(self.population):
vi = pi.vector
#...pick other 3 individuals
indices= [ j for j in range(self.N) if j != i ]
irand = int(random.random()*len(indices))
i1 = indices.pop(irand)
irand = int(random.random()*len(indices))
i2 = indices.pop(irand)
irand = int(random.random()*len(indices))
i3 = indices.pop(irand)
# print('i,i1,i2,i3=',i,i1,i2,i3)
ind1 = self.population[i1]
ind2 = self.population[i2]
ind3 = self.population[i3]
v1 = ind1.vector
v2 = ind2.vector
v3 = ind3.vector
vd = v1 +self.F *(v2 -v3)
#...cross over
vnew = np.array(vd)
for k in range(len(vi)):
r = random.random()
if r > self.CR:
vnew[k] = vi[k]
#...create new individual for trial
self.iidmax += 1
newind = Individual(self.iidmax, self.ndim, self.vrs, self.loss_func)
newind.set_variable(vnew)
candidates.append(newind)
#...Evaluate loss func values of candidates
#...This block can be parallelize and it makes the program much faster
# for ic,ci in enumerate(candidates):
# self.kwargs['index'] = ic
# ci.calc_loss_func(self.kwargs)
#...Evaluate loss function values
# qs = [ Queue() for i in range(self.N) ]
prcs = []
for ic,ci in enumerate(candidates):
kwtmp = copy.copy(self.kwargs)
kwtmp['index'] = ic
kwtmp['iid'] = ci.iid
# prcs.append(Process(target=ci.calc_loss_func, args=(kwtmp,qs[ic])))
prcs.append(pool.apply_async(ci.calc_loss_func, (kwtmp,)))
results = [ res.get() for res in prcs ]
for res in results:
val,ic = res
candidates[ic].val = val
# for p in prcs:
# p.start()
# for p in prcs:
# p.join()
# for ic,ci in enumerate(candidates):
# ci.val = qs[ic].get()
#...Check best
for ic,ci in enumerate(candidates):
if ci.val < self.bestind.val:
self.bestind = ci
self.write_variables(ci,
fname='in.vars.fitpot.{0:d}'.format(ci.iid),
**self.kwargs)
if self.print_level > 2:
for ci in candidates:
self.write_variables(ci,
fname='in.vars.fitpot.{0:d}'.format(ci.iid),
**self.kwargs)
#...Decide whether or not to adopt new one
for ic,ci in enumerate(candidates):
pi = self.population[ic]
# #...Skip if pi is the current best
# if pi.iid == self.bestind.iid:
# continue
#...adoption probability
dval = ci.val -pi.val
if dval < 0.0:
prob = 1.0
else:
if self.T > 0.0:
prob = np.exp(-dval/self.T)
else:
prob = 0.0
r = random.random()
if r < prob: # replace with new individual
self.population[ic] = ci
find.write(' {0:8d} {1:12.4e}'.format(ci.iid, ci.val))
for k,vk in enumerate(ci.vector):
find.write(' {0:11.3e}'.format(vk))
find.write('\n')
else:
pass
if self.print_level > 0:
print(' step,time,best,vars= {0:6d} {1:8.1f} {2:8.4f}'.format(it+1, time()-start,
self.bestind.val),end="")
for i in range(min(16,self.ndim)):
print(' {0:6.3f}'.format(self.bestind.vector[i]),end="")
print('', flush=True)
for i,ind in enumerate(self.population):
fgen.write(' {0:5d} {1:8d} {2:12.4e}\n'.format(it+1, ind.iid, ind.val))
fgen.close()
find.close()
#...Finaly write out the best one
self.write_variables(self.bestind,fname='in.vars.fitpot.best',**self.kwargs)
return None
def write_variables(self,ind,fname='in.vars.fitpot',**kwargs):
vs = ind.vector
vrs = ind.vranges
self.write_func(vs,vrs,fname,**kwargs)
return None
if __name__ == "__main__":
args = docopt(__doc__)
n = int(args['-n'])
kwargs = {}
kwargs['print_level'] = int(args['--print-level'])
vs = np.array([1.0, -0.5])
vrs = np.array([[-1.0, 2.0],[-1.0, 1.0]])
de = DE(10, 0.8, 0.5, 1.0, vs, vrs, test_func, test_write_func, **kwargs)
de.run(n)
| |
import re
from .constants import OPERATORS, INDENT
from .element import HTMLElement
from . import utils
class StripOuter(Exception):
def __init__(self, html, *args, **kwargs):
super(StripOuter, self).__init__(*args, **kwargs)
self.html = html
class Node(object):
PARSE = True
@staticmethod
def create(parser, haml, nested_haml='', parent=None, indentation=-1):
haml = haml.strip()
NODES = [
(HAMLComment, '-#'),
(HTMLCommentNode, '/'),
(HTMLNode, ('#', '.', '%')),
(CodeNode, '-'),
(EvalNode, ('=', '>=')),
(DoctypeNode, '!!!'),
(RawNode, ('\\', '>')),
# Filters
(PlainFilterNode, ':plain'),
(JavaScriptFilterNode, ':javascript'),
(CssFilterNode, ':css'),
(CdataFilterNode, ':cdata'),
(EscapedFilterNode, ':escaped'),
]
for cls, operators in NODES:
if not isinstance(operators, tuple):
operators = (operators,)
for operator in operators:
if haml.startswith(operator):
return cls(parser, haml, nested_haml, parent, indentation=indentation)
return RawNode(parser, haml, nested_haml, parent, indentation=indentation)
def __init__(self, parser, haml, nested_haml='', parent=None, indentation=-1):
self.parser = parser
self.haml = haml
self.nested_haml = nested_haml
self.parent = parent
self.siblings = {
'left': [],
'right': []
}
self.children = []
self.indentation = indentation
if not self.PARSE:
return
if isinstance(nested_haml, list):
lines = nested_haml
else:
lines = [line for line in nested_haml.split('\n') if line]
while lines:
line = lines.pop(0)
line_indentation = utils.indentation(line)
MULTILINE = OPERATORS['multiline']
if line.rstrip().endswith(MULTILINE):
m_lines = [line.rstrip()]
while True:
try:
if lines[0].endswith(MULTILINE):
m_lines.append(lines.pop(0).rstrip())
else:
break
except IndexError:
break
line = self._indent(' '.join(line.rstrip(MULTILINE).strip() for line in m_lines), line_indentation)
nested_lines = []
while True:
if not lines:
break
try:
if utils.indentation(lines[0]) <= line_indentation:
break
else:
nested_lines.append(lines.pop(0))
except IndexError:
break
node = Node.create(self.parser, line, nested_lines, parent=self, indentation=self.indentation + 1)
for child in self.children:
node.add_sibling('left', child)
child.add_sibling('right', node)
self.children.append(node)
def add_sibling(self, location, sibling):
location = location.lower()
assert location in ('left', 'right')
self.siblings[location].append(sibling)
def get_sibling(self, n):
if n < 0:
siblings = self.siblings['left']
elif n > 0:
siblings = self.siblings['right']
else:
return None
if not len(siblings):
return None
n = abs(n) - 1
if n > len(siblings):
return None
return siblings[n]
def _indent(self, line, indentation=None):
return utils.indent(line, indentation or self.indentation)
def render_children(self):
rendered_children = []
outerstrip = False
length = len(self.children)
for i, child in enumerate(self.children):
lstrip = outerstrip
try:
html = child.to_html()
outerstrip = False
except StripOuter as so:
html = so.html
outerstrip = True
if rendered_children:
rendered_children[-1] = rendered_children[-1].rstrip()
if lstrip:
html = html.lstrip()
if html is not None:
if outerstrip:
rendered_children.append(html.strip())
else:
if i < length - 1:
html += '\n'
rendered_children.append(html)
return ''.join(rendered_children)
def to_html(self):
html = self.render_children()
if html:
html = re.sub(r'#\{(.*?)\}', self.parser.target.eval('\\1'), html + '\n')
return html
class RawNode(Node):
def to_html(self):
content = self.haml
if content.startswith(OPERATORS['outerstrip']):
raise StripOuter(self._indent(content[1:]))
if content.startswith(OPERATORS['escape']):
content = content[1:]
return self._indent(content)
class DoctypeNode(Node):
DOCTYPES = {
'Strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'Frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">',
'5': '<!DOCTYPE html>',
'1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">',
'Mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">',
'RDFa': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">',
'XML': '<?xml version="1.0" encoding="utf-8" ?>',
}
def to_html(self):
doctype = self.haml.lstrip(OPERATORS['doctype']).strip()
return self.DOCTYPES.get(doctype, '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">')
class HTMLNode(Node):
def to_html(self):
indentation = self.indentation
element = HTMLElement(self)
html = element.render(self.render_children(), indentation=indentation)
if element.outerstrip:
raise StripOuter(html)
else:
return html
class HTMLCommentNode(Node):
def to_html(self):
conditionals = re.findall(r'^/\[(.*?)\]', self.haml)
if conditionals:
start = '<!--[%s]>' % conditionals[0]
end = '<!endif-->'
else:
start = '<!--'
end = '-->'
rendered_children = self.render_children()
if rendered_children:
return '\n'.join([self._indent(start), rendered_children, self._indent(end)])
return self._indent(' '.join([start, self.haml.lstrip(OPERATORS['html-comment']).lstrip(), end]))
class HAMLComment(Node):
def to_html(self):
return None
class EvalNode(Node):
def to_html(self):
content = self._indent(self.parser.target.eval(self.haml.lstrip(OPERATORS['outerstrip']).lstrip(OPERATORS['evaluate']).strip()))
if self.haml.startswith(OPERATORS['outerstrip']):
raise StripOuter(content)
return content
class CodeNode(Node):
def __init__(self, *args, **kwargs):
Node.__init__(self, *args, **kwargs)
parts = self.haml.lstrip(OPERATORS['code']).strip().split(' ', 1)
self.keyword = parts[0]
self.expression = ''
if len(parts) > 1:
self.expression = parts[1]
def to_html(self):
open, close = self.parser.target.block(self, self.keyword, self.expression)
return ''.join(map(str.strip, (open, self.render_children(), close)))
class PlainFilterNode(Node):
PARSE = False
def to_html(self):
return '\n'.join([line[INDENT:] for line in self.nested_haml])
class EscapedFilterNode(PlainFilterNode):
PARSE = False
def to_html(self):
return utils.xhtml_escape(PlainFilterNode.to_html(self))
class CdataFilterNode(Node):
PARSE = False
def to_html(self):
buf = [self._indent('//<![CDATA[', self.indentation)]
for line in self.nested_haml:
buf.append(line[INDENT:])
buf.append(self._indent('//]]>', self.indentation))
return '\n'.join(buf)
class JavaScriptFilterNode(Node):
PARSE = False
def to_html(self):
buf = [self._indent('<script type="text/javascript">'), self._indent('//<![CDATA[', self.indentation + 1)]
for line in self.nested_haml:
buf.append(line[INDENT:])
buf.append(self._indent('//]]>', self.indentation + 1))
buf.append(self._indent('</script>'))
return '\n'.join(buf)
class CssFilterNode(Node):
PARSE = False
def to_html(self):
buf = [self._indent('<style type="text/css">'), self._indent('//<![CDATA[', self.indentation + 1)]
for line in self.nested_haml:
buf.append(line[INDENT:])
buf.append(self._indent('//]]>', self.indentation + 1))
buf.append(self._indent('</style>'))
return '\n'.join(buf)
| |
# -*- coding: utf-8 -*-
"""
sphinx.builders.epub
~~~~~~~~~~~~~~~~~~~~
Build epub files.
Originally derived from qthelp.py.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import time
import codecs
import zipfile
from os import path
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.osutil import ensuredir, copyfile, EEXIST
from sphinx.util.smartypants import sphinx_smarty_pants as ssp
from sphinx.util.console import brown
# (Fragment) templates from which the metainfo files content.opf, toc.ncx,
# mimetype, and META-INF/container.xml are created.
# This template section also defines strings that are embedded in the html
# output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py.
_mimetype_template = 'application/epub+zip' # no EOL!
_container_template = u'''\
<?xml version="1.0" encoding="UTF-8"?>
<container version="1.0"
xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="content.opf"
media-type="application/oebps-package+xml"/>
</rootfiles>
</container>
'''
_toc_template = u'''\
<?xml version="1.0"?>
<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">
<head>
<meta name="dtb:uid" content="%(uid)s"/>
<meta name="dtb:depth" content="%(level)d"/>
<meta name="dtb:totalPageCount" content="0"/>
<meta name="dtb:maxPageNumber" content="0"/>
</head>
<docTitle>
<text>%(title)s</text>
</docTitle>
<navMap>
%(navpoints)s
</navMap>
</ncx>
'''
_navpoint_template = u'''\
%(indent)s <navPoint id="%(navpoint)s" playOrder="%(playorder)d">
%(indent)s <navLabel>
%(indent)s <text>%(text)s</text>
%(indent)s </navLabel>
%(indent)s <content src="%(refuri)s" />
%(indent)s </navPoint>'''
_navpoint_indent = ' '
_navPoint_template = 'navPoint%d'
_content_template = u'''\
<?xml version="1.0" encoding="UTF-8"?>
<package xmlns="http://www.idpf.org/2007/opf" version="2.0"
unique-identifier="%(uid)s">
<metadata xmlns:opf="http://www.idpf.org/2007/opf"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<dc:language>%(lang)s</dc:language>
<dc:title>%(title)s</dc:title>
<dc:creator opf:role="aut">%(author)s</dc:creator>
<dc:publisher>%(publisher)s</dc:publisher>
<dc:rights>%(copyright)s</dc:rights>
<dc:identifier id="%(uid)s" opf:scheme="%(scheme)s">%(id)s</dc:identifier>
<dc:date>%(date)s</dc:date>
</metadata>
<manifest>
<item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
%(files)s
</manifest>
<spine toc="ncx">
%(spine)s
</spine>
<guide>
%(guide)s
</guide>
</package>
'''
_cover_template = u'''\
<meta name="cover" content="%(cover)s"/>
'''
_coverpage_name = u'epub-cover.html'
_file_template = u'''\
<item id="%(id)s"
href="%(href)s"
media-type="%(media_type)s" />'''
_spine_template = u'''\
<itemref idref="%(idref)s" />'''
_guide_template = u'''\
<reference type="%(type)s" title="%(title)s" href="%(uri)s" />'''
_toctree_template = u'toctree-l%d'
_link_target_template = u' [%(uri)s]'
_footnote_label_template = u'#%d'
_footnotes_rubric_name = u'Footnotes'
_css_link_target_class = u'link-target'
# XXX These strings should be localized according to epub_language
_guide_titles = {
'toc': u'Table of Contents',
'cover': u'Cover'
}
_media_types = {
'.html': 'application/xhtml+xml',
'.css': 'text/css',
'.png': 'image/png',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
}
_vector_graphics_extensions = ('.svg',)
# Regular expression to match colons only in local fragment identifiers.
# If the URI contains a colon before the #,
# it is an external link that should not change.
_refuri_re = re.compile("([^#:]*#)(.*)")
# The epub publisher
class EpubBuilder(StandaloneHTMLBuilder):
"""
Builder that outputs epub files.
It creates the metainfo files container.opf, toc.ncx, mimetype, and
META-INF/container.xml. Afterwards, all necessary files are zipped to an
epub file.
"""
name = 'epub'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for epub must be .html only
self.out_suffix = '.html'
self.playorder = 0
self.tocid = 0
def get_theme_config(self):
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
def make_id(self, name, id_cache={}):
# id_cache is intentionally mutable
"""Return a unique id for name."""
id = id_cache.get(name)
if not id:
id = 'epub-%d' % self.env.new_serialno('epub')
id_cache[name] = id
return id
def esc(self, name):
"""Replace all characters not allowed in text an attribute values."""
# Like cgi.escape, but also replace apostrophe
name = name.replace('&', '&')
name = name.replace('<', '<')
name = name.replace('>', '>')
name = name.replace('"', '"')
name = name.replace('\'', ''')
return name
def get_refnodes(self, doctree, result):
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
if isinstance(doctree, nodes.reference) and 'refuri' in doctree:
refuri = doctree['refuri']
if refuri.startswith('http://') or refuri.startswith('https://') \
or refuri.startswith('irc:') or refuri.startswith('mailto:'):
return result
classes = doctree.parent.attributes['classes']
for level in range(8, 0, -1): # or range(1, 8)?
if (_toctree_template % level) in classes:
result.append({
'level': level,
'refuri': self.esc(refuri),
'text': ssp(self.esc(doctree.astext()))
})
break
else:
for elem in doctree.children:
result = self.get_refnodes(elem, result)
return result
def get_toc(self):
"""Get the total table of contents, containing the master_doc
and pre and post files not managed by sphinx.
"""
doctree = self.env.get_and_resolve_doctree(self.config.master_doc,
self, prune_toctrees=False,
includehidden=True)
self.refnodes = self.get_refnodes(doctree, [])
master_dir = path.dirname(self.config.master_doc)
if master_dir:
master_dir += '/' # XXX or os.sep?
for item in self.refnodes:
item['refuri'] = master_dir + item['refuri']
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes):
"""Add the master_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
'level': 1,
'refuri': self.esc(self.config.master_doc + '.html'),
'text': ssp(self.esc(
self.env.titles[self.config.master_doc].astext()))
})
for file, text in reversed(self.config.epub_pre_files):
refnodes.insert(0, {
'level': 1,
'refuri': self.esc(file),
'text': ssp(self.esc(text))
})
for file, text in self.config.epub_post_files:
refnodes.append({
'level': 1,
'refuri': self.esc(file),
'text': ssp(self.esc(text))
})
def fix_fragment(self, prefix, fragment):
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
def fix_ids(self, tree):
"""Replace colons with hyphens in href and id attributes.
Some readers crash because they interpret the part as a
transport protocol specification.
"""
for node in tree.traverse(nodes.reference):
if 'refuri' in node:
m = _refuri_re.match(node['refuri'])
if m:
node['refuri'] = self.fix_fragment(m.group(1), m.group(2))
if 'refid' in node:
node['refid'] = self.fix_fragment('', node['refid'])
for node in tree.traverse(addnodes.desc_signature):
ids = node.attributes['ids']
newids = []
for id in ids:
newids.append(self.fix_fragment('', id))
node.attributes['ids'] = newids
def add_visible_links(self, tree, show_urls='inline'):
"""Add visible link targets for external links"""
def make_footnote_ref(doc, label):
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
doc.note_autofootnote_ref(footnote_ref)
return footnote_ref
def make_footnote(doc, label, uri):
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
para.append(nodes.Text(uri))
footnote.append(para)
footnote.insert(0, nodes.label('', label))
doc.note_autofootnote(footnote)
return footnote
def footnote_spot(tree):
"""Find or create a spot to place footnotes.
The function returns the tuple (parent, index)."""
# The code uses the following heuristic:
# a) place them after the last existing footnote
# b) place them after an (empty) Footnotes rubric
# c) create an empty Footnotes rubric at the end of the document
fns = tree.traverse(nodes.footnote)
if fns:
fn = fns[-1]
return fn.parent, fn.parent.index(fn) + 1
for node in tree.traverse(nodes.rubric):
if len(node.children) == 1 and \
node.children[0].astext() == _footnotes_rubric_name:
return node.parent, node.parent.index(node) + 1
doc = tree.traverse(nodes.document)[0]
rub = nodes.rubric()
rub.append(nodes.Text(_footnotes_rubric_name))
doc.append(rub)
return doc, doc.index(rub) + 1
if show_urls == 'no':
return
if show_urls == 'footnote':
doc = tree.traverse(nodes.document)[0]
fn_spot, fn_idx = footnote_spot(tree)
nr = 1
for node in tree.traverse(nodes.reference):
uri = node.get('refuri', '')
if (uri.startswith('http:') or uri.startswith('https:') or
uri.startswith('ftp:')) and uri not in node.astext():
idx = node.parent.index(node) + 1
if show_urls == 'inline':
uri = _link_target_template % {'uri': uri}
link = nodes.inline(uri, uri)
link['classes'].append(_css_link_target_class)
node.parent.insert(idx, link)
elif show_urls == 'footnote':
label = _footnote_label_template % nr
nr += 1
footnote_ref = make_footnote_ref(doc, label)
node.parent.insert(idx, footnote_ref)
footnote = make_footnote(doc, label, uri)
fn_spot.insert(fn_idx, footnote)
footnote_ref['refid'] = footnote['ids'][0]
footnote.add_backref(footnote_ref['ids'][0])
fn_idx += 1
def write_doc(self, docname, doctree):
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
self.fix_ids(doctree)
self.add_visible_links(doctree, self.config.epub_show_urls)
return StandaloneHTMLBuilder.write_doc(self, docname, doctree)
def fix_genindex(self, tree):
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
for key, columns in tree:
for entryname, (links, subitems) in columns:
for (i, (ismain, link)) in enumerate(links):
m = _refuri_re.match(link)
if m:
links[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
for subentryname, subentrylinks in subitems:
for (i, (ismain, link)) in enumerate(subentrylinks):
m = _refuri_re.match(link)
if m:
subentrylinks[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename):
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in _vector_graphics_extensions
def copy_image_files_pil(self):
"""Copy images using the PIL.
The method tries to read and write the files with the PIL,
converting the format and resizing the image if necessary/possible.
"""
ensuredir(path.join(self.outdir, self.imagedir))
for src in self.app.status_iterator(self.images, 'copying images... ',
brown, len(self.images)):
dest = self.images[src]
try:
img = Image.open(path.join(self.srcdir, src))
except IOError:
if not self.is_vector_graphics(src):
self.warn('cannot read image file %r: copying it instead' %
(path.join(self.srcdir, src), ))
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except (IOError, OSError) as err:
self.warn('cannot copy image file %r: %s' %
(path.join(self.srcdir, src), err))
continue
if self.config.epub_fix_images:
if img.mode in ('P',):
# See PIL documentation for Image.convert()
img = img.convert()
if self.config.epub_max_image_width > 0:
(width, height) = img.size
nw = self.config.epub_max_image_width
if width > nw:
nh = (height * nw) / width
img = img.resize((nw, nh), Image.BICUBIC)
try:
img.save(path.join(self.outdir, self.imagedir, dest))
except (IOError, OSError) as err:
self.warn('cannot write image file %r: %s' %
(path.join(self.srcdir, src), err))
def copy_image_files(self):
"""Copy image files to destination directory.
This overwritten method can use the PIL to convert image files.
"""
if self.images:
if self.config.epub_fix_images or self.config.epub_max_image_width:
if not Image:
self.warn('PIL not found - copying image files')
super(EpubBuilder, self).copy_image_files()
else:
self.copy_image_files_pil()
else:
super(EpubBuilder, self).copy_image_files()
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
attributes.
"""
if pagename.startswith('genindex'):
self.fix_genindex(addctx['genindexentries'])
StandaloneHTMLBuilder.handle_page(self, pagename, addctx, templatename,
outfilename, event_arg)
# Finish by building the epub file
def handle_finish(self):
"""Create the metainfo files and finally the epub."""
self.get_toc()
self.build_mimetype(self.outdir, 'mimetype')
self.build_container(self.outdir, 'META-INF/container.xml')
self.build_content(self.outdir, 'content.opf')
self.build_toc(self.outdir, 'toc.ncx')
self.build_epub(self.outdir, self.config.epub_basename + '.epub')
def build_mimetype(self, outdir, outname):
"""Write the metainfo file mimetype."""
self.info('writing %s file...' % outname)
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(_mimetype_template)
finally:
f.close()
def build_container(self, outdir, outname):
"""Write the metainfo file META-INF/cointainer.xml."""
self.info('writing %s file...' % outname)
fn = path.join(outdir, outname)
try:
os.mkdir(path.dirname(fn))
except OSError as err:
if err.errno != EEXIST:
raise
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(_container_template)
finally:
f.close()
def content_metadata(self, files, spine, guide):
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
metadata = {}
metadata['title'] = self.esc(self.config.epub_title)
metadata['author'] = self.esc(self.config.epub_author)
metadata['uid'] = self.esc(self.config.epub_uid)
metadata['lang'] = self.esc(self.config.epub_language)
metadata['publisher'] = self.esc(self.config.epub_publisher)
metadata['copyright'] = self.esc(self.config.epub_copyright)
metadata['scheme'] = self.esc(self.config.epub_scheme)
metadata['id'] = self.esc(self.config.epub_identifier)
metadata['date'] = self.esc(time.strftime('%Y-%m-%d'))
metadata['files'] = files
metadata['spine'] = spine
metadata['guide'] = guide
return metadata
def build_content(self, outdir, outname):
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
self.info('writing %s file...' % outname)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
self.files = []
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
self.config.epub_basename + '.epub'] + \
self.config.epub_exclude_files
for root, dirs, files in os.walk(outdir):
for fn in files:
filename = path.join(root, fn)[olen:]
if filename in self.ignored_files:
continue
ext = path.splitext(filename)[-1]
if ext not in _media_types:
# we always have JS and potentially OpenSearch files, don't
# always warn about them
if ext not in ('.js', '.xml'):
self.warn('unknown mimetype for %s, ignoring' % filename)
continue
filename = filename.replace(os.sep, '/')
projectfiles.append(_file_template % {
'href': self.esc(filename),
'id': self.esc(self.make_id(filename)),
'media_type': self.esc(_media_types[ext])
})
self.files.append(filename)
# spine
spine = []
for item in self.refnodes:
if '#' in item['refuri']:
continue
if item['refuri'] in self.ignored_files:
continue
spine.append(_spine_template % {
'idref': self.esc(self.make_id(item['refuri']))
})
for info in self.domain_indices:
spine.append(_spine_template % {
'idref': self.esc(self.make_id(info[0] + self.out_suffix))
})
if self.get_builder_config('use_index', 'epub'):
spine.append(_spine_template % {
'idref': self.esc(self.make_id('genindex' + self.out_suffix))
})
# add the optional cover
content_tmpl = _content_template
html_tmpl = None
if self.config.epub_cover:
image, html_tmpl = self.config.epub_cover
image = image.replace(os.sep, '/')
mpos = content_tmpl.rfind('</metadata>')
cpos = content_tmpl.rfind('\n', 0, mpos) + 1
content_tmpl = content_tmpl[:cpos] + \
_cover_template % {'cover': self.esc(self.make_id(image))} + \
content_tmpl[cpos:]
if html_tmpl:
spine.insert(0, _spine_template % {
'idref': self.esc(self.make_id(_coverpage_name))})
if _coverpage_name not in self.files:
ext = path.splitext(_coverpage_name)[-1]
self.files.append(_coverpage_name)
projectfiles.append(_file_template % {
'href': self.esc(_coverpage_name),
'id': self.esc(self.make_id(_coverpage_name)),
'media_type': self.esc(_media_types[ext])
})
ctx = {'image': self.esc(image), 'title': self.config.project}
self.handle_page(
path.splitext(_coverpage_name)[0], ctx, html_tmpl)
guide = []
auto_add_cover = True
auto_add_toc = True
if self.config.epub_guide:
for type, uri, title in self.config.epub_guide:
file = uri.split('#')[0]
if file not in self.files:
self.files.append(file)
if type == 'cover':
auto_add_cover = False
if type == 'toc':
auto_add_toc = False
guide.append(_guide_template % {
'type': self.esc(type),
'title': self.esc(title),
'uri': self.esc(uri)
})
if auto_add_cover and html_tmpl:
guide.append(_guide_template % {
'type': 'cover',
'title': _guide_titles['cover'],
'uri': self.esc(_coverpage_name)
})
if auto_add_toc and self.refnodes:
guide.append(_guide_template % {
'type': 'toc',
'title': _guide_titles['toc'],
'uri': self.esc(self.refnodes[0]['refuri'])
})
projectfiles = '\n'.join(projectfiles)
spine = '\n'.join(spine)
guide = '\n'.join(guide)
# write the project file
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(content_tmpl %
self.content_metadata(projectfiles, spine, guide))
finally:
f.close()
def new_navpoint(self, node, level, incr=True):
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
self.playorder += 1
self.tocid += 1
node['indent'] = _navpoint_indent * level
node['navpoint'] = self.esc(_navPoint_template % self.tocid)
node['playorder'] = self.playorder
return _navpoint_template % node
def insert_subnav(self, node, subnav):
"""Insert nested navpoints for given node.
The node and subnav are already rendered to text.
"""
nlist = node.rsplit('\n', 1)
nlist.insert(-1, subnav)
return '\n'.join(nlist)
def build_navpoints(self, nodes):
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
the parent node is reinserted in the subnav.
"""
navstack = []
navlist = []
level = 1
lastnode = None
for node in nodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
if file in self.ignored_files:
continue
if node['level'] > self.config.epub_tocdepth:
continue
if node['level'] == level:
navlist.append(self.new_navpoint(node, level))
elif node['level'] == level + 1:
navstack.append(navlist)
navlist = []
level += 1
if lastnode and self.config.epub_tocdup:
# Insert starting point in subtoc with same playOrder
navlist.append(self.new_navpoint(lastnode, level, False))
navlist.append(self.new_navpoint(node, level))
else:
while node['level'] < level:
subnav = '\n'.join(navlist)
navlist = navstack.pop()
navlist[-1] = self.insert_subnav(navlist[-1], subnav)
level -= 1
navlist.append(self.new_navpoint(node, level))
lastnode = node
while level != 1:
subnav = '\n'.join(navlist)
navlist = navstack.pop()
navlist[-1] = self.insert_subnav(navlist[-1], subnav)
level -= 1
return '\n'.join(navlist)
def toc_metadata(self, level, navpoints):
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
metadata = {}
metadata['uid'] = self.config.epub_uid
metadata['title'] = self.config.epub_title
metadata['level'] = level
metadata['navpoints'] = navpoints
return metadata
def build_toc(self, outdir, outname):
"""Write the metainfo file toc.ncx."""
self.info('writing %s file...' % outname)
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(self.config.master_doc,
self, prune_toctrees=False,
includehidden=False)
refnodes = self.get_refnodes(doctree, [])
self.toc_add_files(refnodes)
else:
# 'includehidden'
refnodes = self.refnodes
navpoints = self.build_navpoints(refnodes)
level = max(item['level'] for item in self.refnodes)
level = min(level, self.config.epub_tocdepth)
f = codecs.open(path.join(outdir, outname), 'w', 'utf-8')
try:
f.write(_toc_template % self.toc_metadata(level, navpoints))
finally:
f.close()
def build_epub(self, outdir, outname):
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first
entry.
"""
self.info('writing %s file...' % outname)
projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] \
+ self.files
epub = zipfile.ZipFile(path.join(outdir, outname), 'w',
zipfile.ZIP_DEFLATED)
epub.write(path.join(outdir, 'mimetype'), 'mimetype',
zipfile.ZIP_STORED)
for file in projectfiles:
fp = path.join(outdir, file)
epub.write(fp, file, zipfile.ZIP_DEFLATED)
epub.close()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Pysilhouette.
#
# Copyright (c) 2009-2010 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
@author: Kei Funagayama <kei@karesansui-project.info>
"""
import time
import os
import sys
import math
import subprocess
import signal
import logging
import pysilhouette.log
from pysilhouette import PROCERROR
from pysilhouette.util import astrftime
from pysilhouette.util import kill_proc
def observer(opts, cf):
"""scheduler and performer manage and monitor.
@param opts: command options
@type opts: dict(OptionParser)
@param cf: Configuration info
@type cf: dict
@rtype: int
@return: exit code
"""
def scheduler():
cmd = [cf['observer.target.python'], cf['observer.target.scheduler']]
if cmd_args:
cmd.extend(cmd_args)
if opts.daemon is True:
cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/schedulerd.pid'])
logger.debug('scheduler:popen - cmd=%s' % cmd)
return subprocess.Popen(args=cmd,
close_fds=True,
env=this_env,
shell=False)
def performer():
cmd = [cf['observer.target.python'], cf['observer.target.performer']]
if cmd_args:
cmd.extend(cmd_args)
if opts.daemon is True:
cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/performerd.pid'])
logger.debug('performer:popen - cmd=%s' % cmd)
return subprocess.Popen(args=cmd,
close_fds=True,
env=this_env,
shell=False)
def asynscheduler():
cmd = [cf['observer.target.python'], cf['observer.target.asynscheduler']]
if cmd_args:
cmd.extend(cmd_args)
if opts.daemon is True:
cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/asynschedulerd.pid'])
logger.debug('asynscheduler:popen - cmd=%s' % cmd)
return subprocess.Popen(args=cmd,
close_fds=True,
env=this_env,
shell=False)
def asynperformer():
cmd = [cf['observer.target.python'], cf['observer.target.asynperformer']]
if cmd_args:
cmd.extend(cmd_args)
if opts.daemon is True:
cmd.extend(['-p', os.path.abspath(os.path.dirname(opts.pidfile)) + '/asynperformerd.pid'])
logger.debug('asynperformer:popen - cmd=%s' % cmd)
return subprocess.Popen(args=cmd,
close_fds=True,
env=this_env,
shell=False)
def status(count, status, default, force=False):
try:
if (force is True) or (status != count):
status = count
fp = open(cf["observer.status.path"], "w")
try:
logger.debug("%d/%d" % (count, default))
fp.write("%d/%d" % (count, default))
except:
fp.close()
else:
pass
except IOError, ioe:
logger.error("Failed to write status. file=%s - %s" \
% (cf["observer.status.path"], str(ioe.args)))
##
logger = logging.getLogger('pysilhouette.observer')
# environment
this_env = os.environ
cmd_args = ['-c', opts.config]
if opts.verbose is True:
cmd_args.append('-v')
if opts.daemon is True:
cmd_args.append('-d')
spoint = time.time()
default_count = cf['observer.restart.count'] # default
status_count = default_count # status
count = default_count # now
sd = pf = None
pf = performer() # start!!
logger.info('performer : [start] - pid=%s, count=%s/%s'
% (pf.pid, count, cf['observer.restart.count']))
sd = scheduler() # start!!
logger.info('scheduler : [start] - pid=%s, count=%s/%s'
% (sd.pid, count, cf['observer.restart.count']))
asynpf = asynperformer() # start!!
logger.info('asynperformer : [start] - pid=%s, count=%s/%s'
% (pf.pid, count, cf['observer.restart.count']))
asynsd = asynscheduler() # start!!
logger.info('asynscheduler : [start] - pid=%s, count=%s/%s'
% (sd.pid, count, cf['observer.restart.count']))
status(count, status_count, default_count, True)
try:
while True:
simple_log = []
# Performer
if not pf.poll() is None:
logger.debug('return code=%d' % pf.returncode)
logger.info('performer : [stop] - pid=%s, count=%s/%s'
% (pf.pid, count, cf['observer.restart.count']))
pf = performer() # restart
count -= 1
logger.info('performer : [start] - pid=%s, count=%s/%s'
% (pf.pid, count, cf['observer.restart.count']))
else:
simple_log.append('performer (running) - count=%s/%s' % (count, cf['observer.restart.count']))
logger.debug('performer [running] - pid=%s, count=%s/%s'
% (pf.pid, count, cf['observer.restart.count']))
# Scheduler
if not sd.poll() is None:
logger.debug('return code=%d' % sd.returncode)
logger.info('scheduler : [stop] - pid=%s, count=%s/%s'
% (sd.pid, count, cf['observer.restart.count']))
sd = scheduler() # restart
count -= 1
logger.info('scheduler : [start] - pid=%s, count=%s/%s'
% (sd.pid, count, cf['observer.restart.count']))
else:
simple_log.append('scheduler (running) - count=%s/%s' % (count, cf['observer.restart.count']))
logger.debug('scheduler [running] - pid=%s, count=%s/%s'
% (sd.pid, count, cf['observer.restart.count']))
# AsynPerformer
if not asynpf.poll() is None:
logger.debug('return code=%d' % asynpf.returncode)
logger.info('asynperformer : [stop] - pid=%s, count=%s/%s'
% (asynpf.pid, count, cf['observer.restart.count']))
asynpf = asynperformer() # restart
count -= 1
logger.info('asynperformer : [start] - pid=%s, count=%s/%s'
% (asynpf.pid, count, cf['observer.restart.count']))
else:
simple_log.append('asynperformer (running) - count=%s/%s' % (count, cf['observer.restart.count']))
logger.debug('asynperformer [running] - pid=%s, count=%s/%s'
% (asynpf.pid, count, cf['observer.restart.count']))
# AsynScheduler
if not asynsd.poll() is None:
logger.debug('return code=%d' % asynsd.returncode)
logger.info('asynscheduler : [stop] - pid=%s, count=%s/%s'
% (asynsd.pid, count, cf['observer.restart.count']))
asynsd = asynscheduler() # restart
count -= 1
logger.info('asynscheduler : [start] - pid=%s, count=%s/%s'
% (asynsd.pid, count, cf['observer.restart.count']))
else:
simple_log.append('asynscheduler (running) - count=%s/%s' % ( count, cf['observer.restart.count']))
logger.debug('asynscheduler [running] - pid=%s, count=%s/%s'
% (asynsd.pid, count, cf['observer.restart.count']))
logger.info(str(simple_log)[1:-1])
# status output
status(count, status_count, default_count, False)
if ( 0 < cf['observer.restart.count.clear.time'] ) and (count <= 0):
epoint = time.time()
interval = int(math.ceil(epoint) - math.floor(spoint))
logger.error('observer restart count reached the value specified in config. Checking interval time. observer.restart.count=%s interval=%d/%s'
% (cf['observer.restart.count'], interval, cf['observer.restart.count.clear.time']))
if interval < cf['observer.restart.count.clear.time']:
# Failed 'observer.restart.count' times in 'observer.restart.count.clear.time' seconds.
logger.error('observer restarted %s times in count.clear.time seconds interval. Recognizing as failure. Exiting.'
% cf['observer.restart.count'])
break
else:
# Failed 'observer.restart.count' times in an interval longer than
# 'observer.restart.count.clear.time' seconds. Clearing counter.
spoint = time.time()
count = cf['observer.restart.count']
logger.info('observer restarted %s times, but in not short time. Clearing count. start time %s'
% (cf['observer.restart.count'], astrftime(spoint)))
time.sleep(cf['observer.check.interval'])
# -- end while
finally:
# destroy
# scheduler
if not sd is None:
if kill_proc(sd) is True:
logger.info('KILL %d: killing scheduler succeeded.' % sd.pid)
else:
logger.info('KILL %d: killing scheduler failed.' % sd.pid)
# performer
if not pf is None:
if kill_proc(pf) is True:
logger.info('KILL %d: killing performer succeeded.' % pf.pid)
else:
logger.info('KILL %d: killing performer failed.' % pf.pid)
# asynscheduler
if not asynsd is None:
if kill_proc(asynsd) is True:
logger.info('KILL %d: killing asynscheduler succeeded.' % asynsd.pid)
else:
logger.info('KILL %d: killing asynscheduler failed.' % asynsd.pid)
if not asynpf is None:
if kill_proc(asynpf) is True:
logger.info('KILL %d: killing asynperformer succeeded.' % asynpf.pid)
else:
logger.info('KILL %d: killing asynperformer failed.' % asynpf.pid)
return PROCERROR
# -- daemon
def daemonize(stdin, stdout, stderr, pidfile):
"""The state is changed into daemon.
"""
logger = logging.getLogger('pysilhouette.daemonize')
try:
pid = os.fork()
if pid > 0: sys.exit(0)
except OSError, e:
print >>sys.stderr, 'fork #1 failed: (%d) %s\n' % (e.errno, e.strerror)
logger.error('fork #1 failed: (%d) %s\n' % (e.errno, e.strerror))
sys.exit(1)
os.chdir('/')
os.umask(0)
os.setsid()
try:
pid = os.fork()
if pid > 0: sys.exit(0)
except OSError, e:
print >>sys.stderr, 'fork #2 failed: (%d) %s\n' % (e.errno, e.strerror)
logger.error('fork #2 failed: (%d) %s\n' % (e.errno, e.strerror))
sys.exit(1)
# Write pid.
pid=''
try:
f = file(pidfile, 'w')
pid = os.getpid()
f.write('%d' % pid)
f.close()
except IOError:
print >>sys.stderr, 'file=%s - daemonize: failed to write pid to %s' % (pidfile , pid)
logger.error('file=%s - daemonize: failed to write pid to %s' % (pidfile , pid))
sys.exit(1)
for f in sys.stdout, sys.stderr: f.flush()
sin = file(stdin, 'r')
sout = file(stdout, 'a+')
serr = file(stderr, 'a+')
os.dup2(sin.fileno(), sys.stdin.fileno())
os.dup2(sout.fileno(), sys.stdout.fileno())
os.dup2(serr.fileno(), sys.stderr.fileno())
return pid
if __name__ == '__main__':
pass
| |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from contextlib import contextmanager
from traits.api import Instance
# ============= standard library imports ========================
import shutil
import os
import yaml
# ============= local library imports ==========================
from pychron.core.helpers.filetools import unique_path2, add_extension
from pychron.loggable import Loggable
from pychron.paths import paths
class AutoMFTable(Loggable):
ion_optics_manager = Instance(
"pychron.spectrometer.ion_optics_manager.IonOpticsManager"
)
el_manager = Instance(
"pychron.extraction_line.extraction_line_manager.ExtractionLineManager"
)
pyscript_task = Instance("pychron.pyscript.tasks.pyscript_task.PyScriptTask")
spectrometer_manager = Instance(
"pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager"
)
def do_auto_mftable(self, path=None):
yd = self._load_config(path)
if yd:
if not self._prepare(yd["extraction"]):
self.warning("Failed preparing system")
return
with self._ctx():
dets = yd["detectors"]
refiso = yd["reference_isotope"]
if self._construct_mftable(dets, yd["isotopes"]):
self._backup_deflection(reset=True)
defls = yd["deflections"]
self._construct_deflection(dets, defls, refiso)
else:
self.debug("Failed loading configuration")
def _set_config_deflections(self):
self.debug("setting deflections to config values")
def _do_peak_center(self, detector, isotope, save=True):
ion = self.ion_optics_manager
pc = ion.setup_peak_center(detector=[detector], isotope=isotope, new=True)
ion.do_peak_center(
new_thread=False, save=save, message="automated run peakcenter"
)
pcr = ion.peak_center_result
if pcr:
pc.close_graph()
return pcr
def _construct_deflection(self, dets, defls, refiso):
for di in dets:
try:
defli = defls[di]
if not isinstance(defli, tuple):
defli = (defli,)
except KeyError:
self.warning("No deflection for {}. using 100 as default".format(di))
defli = (100,)
for de in defli:
if de == 0:
self._update_deflection_file_from_mftable(di, refiso)
self.info("Deflection=0. Using mftable value for {}".format(di))
else:
self.info(
"calculating peak center for {} on {}. deflection={}".format(
refiso, di, de
)
)
self._set_deflection(di, de)
pc = self._do_peak_center(di, refiso, save=False)
if pc:
self._update_deflection(di, de, pc)
def _construct_mftable(self, dets, isos):
pc = self._do_peak_center(dets[0], isos[0])
if not pc:
self.warning("Failed reference peak center")
return
skipref = self._current_deflection(dets[0]) == 0
for i, di in enumerate(dets):
for j, iso in enumerate(isos):
if i == 0 and j == 0 and skipref:
continue
self._set_deflection(di, 0)
self._do_peak_center(di, iso)
def _current_deflection(self, det):
self.debug("get deflection for {}".format(det))
return self.spectrometer_man.get_deflection(det)
def _set_deflection(self, det, defl):
self.debug("setting deflection. det={}, defl={}".format(det, defl))
self.spectrometer_man.set_deflection(det, defl)
def _update_deflection_file_from_mftable(self, di, refiso):
dac = self.mftable.get_dac(di, refiso)
self._update_deflection(di, 0, dac)
def _update_deflection(self, di, defl, dac):
self.debug("Update deflection det={},defl={} dac={}".format(di, defl, dac))
p = paths.deflection
with open(p, "r") as rfile:
yd = yaml.load(rfile)
dd = yd[di]
defls = dd["deflections"].split(",")
defls.append(defl)
dacs = dd["dacs"].split(",")
dacs.append(dac)
yd["deflections"] = ",".join(defls)
yd["dacs"] = ",".join(dacs)
with open(p, "w") as wfile:
yaml.dump(yd, wfile, default_flow_style=False)
def _backup_mftable(self):
src = paths.mftable
head, tail = os.path.split(src)
dst = os.path.join(head, "~{}".format(tail))
self.debug("backing up {} to {}".format(src, dst))
shutil.copyfile(src, dst)
def _backup_deflection(self, reset=False):
src = paths.deflection
if src:
dst = unique_path2(paths.backup_deflection_dir)
self.debug("backing up {} to {}".format(src, dst))
shutil.copyfile(src, dst)
if reset:
with open(src, "r") as rfile:
yd = yaml.load(rfile)
nd = {}
for k in yd:
nd[k] = {"deflections": "", "dacs": ""}
with open(src, "w") as wfile:
yaml.dump(nd, wfile, default_flow_style=False)
def _load_config(self, path):
with open(path, "r") as rfile:
yd = yaml.load(rfile)
try:
yd["detectors"] = [di.strip() for di in yd["detectors"].split(",")]
yd["isotopes"] = [iso.strip() for iso in yd["isotopes"].split(",")]
yd["deflections"] = eval(yd["deflections"])
except BaseException as e:
self.debug("failed parsing config file {}. exception={}".format(path, e))
return yd
def _prepare(self, extraction_script):
extraction_script = add_extension(extraction_script)
task = self.pyscript_task
root, name = os.path.split(extraction_script)
ctx = {"analysis_type": "blank" if "blank" in name else "unknown"}
ret = task.execute_script(name, root, new_thread=False, context=ctx)
self.info(
"Extraction script {} {}".format(
name, "completed successfully" if ret else "failed"
)
)
return ret
@contextmanager
def _ctx(self):
# enter
self._backup_mftable()
yield
# exit
# return to original deflections
self._set_config_deflections()
# ============= EOF =============================================
| |
#!/usr/bin/python
"""
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gzip
import json
import logging
import math
import os
import struct
import time
#Globals
options = None
########################################################################################################################
# Pcap processing
########################################################################################################################
class Pcap():
def __init__(self):
self.start_seconds = None
self.start_time = None
self.local_ethernet_mac = None
self.slices = {'in': [], 'out': [], 'in_dup': []}
self.bytes = {'in': 0, 'out': 0, 'in_dup': 0}
self.streams = {}
return
def SaveStats(self, out):
file_name, ext = os.path.splitext(out)
if ext.lower() == '.gz':
f = gzip.open(out, 'wb')
else:
f = open(out, 'wb')
try:
result = {"bytes": self.bytes}
json.dump(result, f)
logging.info('Result stats written to {0}'.format(out))
except:
logging.critical('Error writing result stats to {0}'.format(out))
f.close()
def SaveDetails(self, out):
file_name, ext = os.path.splitext(out)
if ext.lower() == '.gz':
f = gzip.open(out, 'wb')
else:
f = open(out, 'wb')
try:
json.dump(self.slices, f)
logging.info('Result details written to {0}'.format(out))
except:
logging.critical('Error writing result details to {0}'.format(out))
f.close()
def Print(self):
global options
if options.json:
print(json.dumps(self.bytes, indent=2))
else:
print "Bytes Out: {0:d}".format(self.bytes['out'])
print "Bytes In: {0:d}".format(self.bytes['in'])
print "Duplicate Bytes In: {0:d}".format(self.bytes['in_dup'])
def Process(self, pcap):
f = None
self.__init__() #Reset state if called multiple times
try:
file_name, ext = os.path.splitext(pcap)
if ext.lower() == '.gz':
f = gzip.open(pcap, 'rb')
else:
f = open(pcap, 'rb')
bytes = f.read(24)
# File header:
# Magic Number - 4 bytes - 0xa1b2c3d4
# Major Version - 2 bytes
# Minor version - 2 bytes
# Tz offset - 4 bytes (always 0)
# Timestamp accuracy - 4 bytes (always 0)
# Snapshot length - 4 bytes
# Link layer header type - 4 bytes
#
# unpack constants:
# L - unsigned long (4 byte)
# H - unsigned short (2 byte)
# B - unsigned char (1 byte int)
file_header = struct.unpack("=LHHLLLL", bytes)
# ignore byte order reversals for now
if file_header[0] == 0xa1b2c3d4:
ok = True
self.linktype = file_header[6]
if self.linktype == 1:
self.linklen = 12
elif self.linktype == 113:
self.linklen = 14
else:
logging.critical("Unknown link layer header type: {0:d}".format(self.linktype))
ok = False
# Packet header:
# Time stamp (seconds) - 4 bytes
# Time stamp (microseconds value) - 4 bytes
# Captured data length - 4 bytes
# Original length - 4 bytes
while ok:
bytes = f.read(16)
if not bytes or len(bytes) < 16:
break
(seconds, useconds, captured_length, packet_length) = struct.unpack("=LLLL", bytes)
if self.start_seconds is None:
self.start_seconds = seconds
seconds -= self.start_seconds
if packet_length and captured_length <= packet_length:
packet_time = float(seconds) + float(useconds) / 1000000.0
packet_info = {}
packet_info['time'] = packet_time
packet_info['length'] = packet_length
packet_info['captured_length'] = captured_length
packet_info['valid'] = False
if captured_length:
packet_data = f.read(captured_length)
else:
packet_data = None
if len(packet_data) >= self.linklen:
try:
self.ProcessPacket(packet_data, packet_info)
except Exception as e:
print(e)
else:
logging.critical("Invalid pcap file " + pcap)
except:
logging.critical("Error processing pcap " + pcap)
if f is not None:
f.close()
return
def ProcessPacket(self, packet_data, packet_info):
if self.linktype == 1:
# Ethernet:
# dst1: 2 bytes
# dst2: 2 bytes
# dst3: 2 bytes
# src1: 2 bytes
# src2: 2 bytes
# src3: 2 bytes
ethernet_header = struct.unpack("!HHHHHH", packet_data[0:self.linklen])
# Ignore broadcast traffic
packet_info['ethernet_dst'] = [ethernet_header[0], ethernet_header[1], ethernet_header[2]]
packet_info['ethernet_src'] = [ethernet_header[3], ethernet_header[4], ethernet_header[5]]
dst = packet_info['ethernet_dst']
if dst[0] != 0xFFFF or dst[1] != 0xFFFF or dst[2] != 0xFFFF:
packet_info['valid'] = True
elif self.linktype == 113:
# Linux cooked capture
# Packet Type: 2 bytes
# aprhrd type: 2 bytes
# Address length: 2 bytes
# Address part 1: 4 bytes
# Address part 2: 4 bytes
cooked_header = struct.unpack("!HHHLL", packet_data[0:self.linklen])
if cooked_header[0] == 0:
packet_info['valid'] = True
packet_info['direction'] = 'in'
if cooked_header[0] == 4:
packet_info['valid'] = True
packet_info['direction'] = 'out'
protocol = struct.unpack("!H", packet_data[self.linklen:self.linklen + 2])[0]
if packet_info['valid'] and protocol == 0x800: # Only handle IPv4 for now
self.ProcessIPv4Packet(packet_data[self.linklen + 2:], packet_info)
if packet_info['valid'] and self.start_time:
if self.local_ethernet_mac is not None:
local = self.local_ethernet_mac
src = packet_info['ethernet_src']
if src[0] == local[0] and src[1] == local[1] and src[2] == local[2]:
packet_info['direction'] = 'out'
else:
packet_info['direction'] = 'in'
if 'direction' in packet_info:
self.ProcessPacketInfo(packet_info)
return
def ProcessIPv4Packet(self, ip_packet, packet_info):
# IP Header:
# Version/len: 1 Byte (4 bits each)
# dscp/ecn: 1 Byte
# Total Length: 2 Bytes
# Identification: 2 Bytes
# Flags/Fragment: 2 Bytes
# TTL: 1 Byte
# Protocol: 1 Byte
# Header Checksum: 2 Bytes
# Source Address: 4 Bytes
# Dest Address: 4 Bytes
if len(ip_packet) > 20:
ip_header = struct.unpack("!BBHHHBBHLL", ip_packet[0:20])
header_length = (ip_header[0] & 0x0F) * 4
total_length = ip_header[2]
payload_length = total_length - header_length
packet_info['ip_payload_length'] = payload_length
packet_info['ip_protocol'] = ip_header[6]
packet_info['ip_src'] = ip_header[8]
addr = struct.unpack("BBBB", ip_packet[12:16])
packet_info['ip_src_str'] = '{0:d}.{1:d}.{2:d}.{3:d}'.format(addr[0], addr[1], addr[2], addr[3])
packet_info['ip_dst'] = ip_header[9]
addr = struct.unpack("BBBB", ip_packet[16:20])
packet_info['ip_dst_str'] = '{0:d}.{1:d}.{2:d}.{3:d}'.format(addr[0], addr[1], addr[2], addr[3])
if payload_length > 0:
payload = ip_packet[header_length:]
if packet_info['ip_protocol'] == 6:
self.ProcessTCPPacket(payload, packet_info)
elif packet_info['ip_protocol'] == 17:
self.ProcessUDPPacket(payload, packet_info)
else:
packet_info['valid'] = False
else:
packet_info['valid'] = False
def ProcessTCPPacket(self, payload, packet_info):
# TCP Packet Header
# Source Port: 2 bytes
# Dest Port: 2 bytes
# Sequence number: 4 bytes
# Ack number: 4 bytes
# Header len: 1 byte (masked)
if len(payload) > 8:
tcp_header = struct.unpack("!HHLLB", payload[0:13])
header_length = (tcp_header[4] >> 4 & 0x0F) * 4
packet_info['tcp_payload_length'] = packet_info['ip_payload_length'] - header_length
packet_info['src_port'] = tcp_header[0]
packet_info['dst_port'] = tcp_header[1]
packet_info['tcp_sequence'] = tcp_header[2]
packet_info['stream_id'] = '{0}:{1:d}->{2}:{3:d}'.format(packet_info['ip_src_str'], packet_info['src_port'],
packet_info['ip_dst_str'], packet_info['dst_port'])
# If DNS didn't trigger a start yet and we see outbound TCP traffic, use that to identify the starting point.
# Outbound can be explicit (if we have a cooked capture like android) or implicit if dest port is 80, 443, 1080.
if self.start_time is None:
is_outbound = False
if 'direction' in packet_info and packet_info['direction'] == 'out':
is_outbound = True
elif packet_info['dst_port'] == 80 or packet_info['dst_port'] == 443 or packet_info['dst_port'] == 1080:
is_outbound = True
if is_outbound:
self.start_time = packet_info['time']
if 'ethernet_src' in packet_info and self.local_ethernet_mac is None:
self.local_ethernet_mac = packet_info['ethernet_src']
else:
packet_info['valid'] = False
def ProcessUDPPacket(self, payload, packet_info):
# UDP Packet header:
# Source Port: 2 bytes
# Dest Port: 2 bytes
# Length (including header): 2 bytes
# Checksum: 2 bytes
if len(payload) > 8:
udp_header = struct.unpack("!HHHH", payload[0:8])
packet_info['src_port'] = udp_header[0]
packet_info['dst_port'] = udp_header[1]
if packet_info['dst_port'] == 53:
self.ProcessDNSRequest(payload[8:], packet_info)
else:
packet_info['valid'] = False
def ProcessDNSRequest(self, payload, packet_info):
if 'ethernet_src' in packet_info and self.local_ethernet_mac is None:
self.local_ethernet_mac = packet_info['ethernet_src']
if self.start_time is None:
self.start_time = packet_info['time']
def ProcessPacketInfo(self, packet_info):
elapsed = packet_info['time'] - self.start_time
bucket = int(math.floor(elapsed * 10))
# Make sure the time slice lists in both directions are the same size and big enough to include the current bucket
for direction in ['in', 'out', 'in_dup']:
length = len(self.slices[direction])
if length <= bucket:
need = bucket - length + 1
self.slices[direction] += [0] * need
# Update the actual accounting
bytes = packet_info['length']
direction = packet_info['direction']
self.bytes[direction] += bytes
self.slices[direction][bucket] += bytes
# If it is a tcp stream, keep track of the sequence numbers and see if any of the data overlaps with previous
# ranges on the same connection.
if direction == 'in' and\
'stream_id' in packet_info and\
'tcp_sequence' in packet_info and\
'tcp_payload_length' in packet_info and\
packet_info['tcp_payload_length'] > 0:
stream = packet_info['stream_id']
data_len = packet_info['tcp_payload_length']
stream_start = packet_info['tcp_sequence']
stream_end = stream_start + data_len
if stream not in self.streams:
self.streams[stream] = []
# Loop through all of the existing packets on the stream to see if the data is duplicate (a spurious retransmit)
duplicate_bytes = 0
for start, end in self.streams[stream]:
overlap = max(0, min(end, stream_end) - max(start, stream_start))
if overlap > duplicate_bytes:
duplicate_bytes = overlap
# If the entire payload is duplicate then the whole packet is duplicate
if duplicate_bytes >= data_len:
duplicate_bytes = packet_info['length']
if duplicate_bytes > 0:
self.bytes['in_dup'] += duplicate_bytes
self.slices['in_dup'][bucket] += duplicate_bytes
# Keep track of the current packet byte range
self.streams[stream].append([stream_start, stream_end])
########################################################################################################################
# Main Entry Point
########################################################################################################################
def main():
global options
import argparse
parser = argparse.ArgumentParser(description='WebPageTest pcap parser.',
prog='pcap-parser')
parser.add_argument('-v', '--verbose', action='count',
help="Increase verbosity (specify multiple times for more). -vvvv for full debug output.")
parser.add_argument('-i', '--input', help="Input pcap file.")
parser.add_argument('-s', '--stats', help="Output bandwidth information file.")
parser.add_argument('-d', '--details', help="Output bandwidth details file (time sliced bandwidth data).")
parser.add_argument('-j', '--json', action='store_true', default=False, help="Set output format to JSON")
options = parser.parse_args()
# Set up logging
log_level = logging.CRITICAL
if options.verbose == 1:
log_level = logging.ERROR
elif options.verbose == 2:
log_level = logging.WARNING
elif options.verbose == 3:
log_level = logging.INFO
elif options.verbose >= 4:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format="%(asctime)s.%(msecs)03d - %(message)s", datefmt="%H:%M:%S")
if not options.input:
parser.error("Input trace file is not specified.")
start = time.time()
pcap = Pcap()
pcap.Process(options.input)
if options.stats:
pcap.SaveStats(options.stats)
if options.details:
pcap.SaveDetails(options.details)
pcap.Print()
end = time.time()
elapsed = end - start
logging.debug("Elapsed Time: {0:0.4f}".format(elapsed))
if '__main__' == __name__:
main()
| |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import ValidationError, fields, validate, validates_schema
from polyaxon.auxiliaries import (
PolyaxonCleanerSchema,
PolyaxonInitContainerSchema,
PolyaxonNotifierSchema,
PolyaxonSidecarContainerSchema,
)
from polyaxon.auxiliaries.default_scheduling import DefaultSchedulingSchema
from polyaxon.deploy.schemas.auth import AuthSchema
from polyaxon.deploy.schemas.deployment_types import DeploymentCharts, DeploymentTypes
from polyaxon.deploy.schemas.email import EmailSchema
from polyaxon.deploy.schemas.ingress import IngressSchema
from polyaxon.deploy.schemas.intervals import IntervalsSchema
from polyaxon.deploy.schemas.operators import OperatorsSchema
from polyaxon.deploy.schemas.proxy import ProxySchema
from polyaxon.deploy.schemas.rbac import RBACSchema
from polyaxon.deploy.schemas.root_user import RootUserSchema
from polyaxon.deploy.schemas.security_context import SecurityContextSchema
from polyaxon.deploy.schemas.service import (
AgentServiceSchema,
ApiServiceSchema,
DeploymentServiceSchema,
ExternalServicesSchema,
HooksSchema,
OperatorServiceSchema,
PostgresqlSchema,
RabbitmqSchema,
RedisSchema,
WorkerServiceSchema,
)
from polyaxon.deploy.schemas.service_types import ServiceTypes
from polyaxon.deploy.schemas.ssl import SSLSchema
from polyaxon.deploy.schemas.ui import UISchema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.types import ConnectionTypeSchema
from polyaxon.utils.signal_decorators import check_partial
def validate_connections(artifacts_store, connections):
connections = connections or []
connection_names = set()
if artifacts_store:
connection_names.add(artifacts_store.name)
for c in connections:
if c.name in connection_names:
raise ValidationError(
"A connection with name `{}` must be unique.".format(c.name)
)
connection_names.add(c.name)
def check_postgres(postgresql, external_services):
postgresql_disabled = postgresql.enabled is False if postgresql else False
external_postgresql = None
if external_services:
external_postgresql = external_services.postgresql
if postgresql_disabled and not external_postgresql:
raise ValidationError(
"A postgresql instance is required, "
"please enable the in-cluster postgresql, "
"or provide an external instance."
)
def check_rabbitmq(rabbitmq, external_services, broker):
rabbitmq_enabled = rabbitmq and rabbitmq.enabled
external_rabbitmq = None
rabbitmq_broker = broker != "redis"
if external_services:
external_rabbitmq = external_services.rabbitmq
if rabbitmq_enabled and external_rabbitmq:
raise ValidationError(
"You can either enable the in-cluster rabbitmq or use an external instance, "
"not both!"
)
rabbitmq_used = rabbitmq_enabled or external_rabbitmq
if rabbitmq_used and not rabbitmq_broker:
raise ValidationError(
"rabbitmq is enabled but you are using a different broker backend!"
)
return rabbitmq_used
def check_redis(redis, external_services, broker):
redis_enabled = redis and redis.enabled
redis_non_broker = redis and redis.non_broker
external_redis = None
redis_broker = broker == "redis"
if external_services:
external_redis = external_services.redis
if redis_enabled and external_redis:
raise ValidationError(
"You can either enable the in-cluster redis or use an external instance, "
"not both!"
)
redis_used = redis_enabled or external_redis
if redis_used and not redis_broker and not redis_non_broker:
raise ValidationError(
"redis is enabled but you are using a different broker backend!"
)
return redis_used, not redis_non_broker
def broker_is_required(services):
for s in services:
if s and s.enabled:
return True
return False
def wrong_agent_deployment_keys(**kwargs):
error_keys = []
for k, v in kwargs.items():
if v is not None:
error_keys.append(k)
if error_keys:
raise ValidationError(
"Agent deployment received some keys that are not required.\n"
"Please remove these config keys from your config file:\n"
"{}".format(error_keys)
)
def validate_platform_deployment(
postgresql,
redis,
rabbitmq,
broker,
scheduler,
compiler,
worker,
beat,
external_services,
):
check_postgres(postgresql, external_services)
redis_used, redis_is_broker = check_redis(redis, external_services, broker)
rabbitmq_used = check_rabbitmq(rabbitmq, external_services, broker)
if rabbitmq_used and redis_used and redis_is_broker:
raise ValidationError(
"You only need to enable rabbitmq or redis for the broker, "
"you don't need to deploy both!"
)
broker_defined = rabbitmq_used or redis_used
services = [scheduler, compiler, worker, beat]
if broker_is_required(services) and not broker_defined:
raise ValidationError(
"You enabled some services that require a broker, please set redis or rabbitmq!"
)
def validate_deployment_chart(
deployment_chart,
agent,
environment,
):
if deployment_chart == DeploymentCharts.AGENT and not agent:
raise ValidationError(
"Agent deployment requires a valid `agent` key configuration."
)
if (
deployment_chart == DeploymentCharts.PLATFORM
and agent
and environment != "staging"
):
raise ValidationError("Platform deployment received an unexpected `agent` key.")
def validate_gateway(gateway):
if not gateway or not gateway.service:
return
service_type = gateway.service.get("type")
if service_type and service_type not in ServiceTypes.VALUES:
raise ValidationError(
"Received an invalid gateway service type: {}".format(service_type)
)
class DeploymentSchema(BaseCamelSchema):
deployment_type = fields.Str(
allow_none=True, validate=validate.OneOf(DeploymentTypes.VALUES)
)
deployment_chart = fields.Str(
allow_none=True,
validate=validate.OneOf(DeploymentCharts.VALUES),
)
deployment_version = fields.Str(allow_none=True)
release_name = fields.Str(allow_none=True)
namespace = fields.Str(allow_none=True)
rbac = fields.Nested(RBACSchema, allow_none=True)
polyaxon_secret = fields.Str(allow_none=True)
internal_token = fields.Str(allow_none=True)
password_length = fields.Int(allow_none=True)
ssl = fields.Nested(SSLSchema, allow_none=True)
encryption_secret = fields.Str(allow_none=True)
platform_secret = fields.Str(allow_none=True)
agent_secret = fields.Str(allow_none=True)
timezone = fields.Str(allow_none=True)
environment = fields.Str(allow_none=True)
ingress = fields.Nested(IngressSchema, allow_none=True)
user = fields.Nested(RootUserSchema, allow_none=True)
node_selector = fields.Dict(allow_none=True)
tolerations = fields.List(fields.Dict(allow_none=True), allow_none=True)
affinity = fields.Dict(allow_none=True)
celery_node_selector = fields.Dict(allow_none=True)
celery_tolerations = fields.List(fields.Dict(allow_none=True), allow_none=True)
celery_affinity = fields.Dict(allow_none=True)
limit_resources = fields.Bool(allow_none=True)
global_replicas = fields.Int(allow_none=True)
global_concurrency = fields.Int(allow_none=True)
gateway = fields.Nested(ApiServiceSchema, allow_none=True)
api = fields.Nested(ApiServiceSchema, allow_none=True)
streams = fields.Nested(ApiServiceSchema, allow_none=True)
scheduler = fields.Nested(WorkerServiceSchema, allow_none=True)
compiler = fields.Nested(WorkerServiceSchema, allow_none=True)
worker = fields.Nested(WorkerServiceSchema, allow_none=True)
beat = fields.Nested(DeploymentServiceSchema, allow_none=True)
agent = fields.Nested(AgentServiceSchema, allow_none=True)
operator = fields.Nested(OperatorServiceSchema, allow_none=True)
init = fields.Nested(PolyaxonInitContainerSchema, allow_none=True)
sidecar = fields.Nested(PolyaxonSidecarContainerSchema, allow_none=True)
notifier = fields.Nested(PolyaxonNotifierSchema, allow_none=True)
cleaner = fields.Nested(PolyaxonCleanerSchema, allow_none=True)
default_scheduling = fields.Nested(DefaultSchedulingSchema, allow_none=True)
default_image_pull_secrets = fields.List(fields.Str(), allow_none=True)
tables_hook = fields.Nested(HooksSchema, allow_none=True)
clean_hooks = fields.Nested(HooksSchema, allow_none=True)
api_hooks = fields.Nested(HooksSchema, allow_none=True)
hooks = fields.Nested(HooksSchema, allow_none=True)
flower = fields.Nested(DeploymentServiceSchema, allow_none=True)
postgresql = fields.Nested(PostgresqlSchema, allow_none=True)
redis = fields.Nested(RedisSchema, allow_none=True)
rabbitmq = fields.Nested(RabbitmqSchema, data_key="rabbitmq-ha", allow_none=True)
broker = fields.Str(allow_none=True, validate=validate.OneOf(["redis", "rabbitmq"]))
email = fields.Nested(EmailSchema, allow_none=True)
ldap = fields.Raw(allow_none=True)
metrics = fields.Raw(allow_none=True)
image_pull_secrets = fields.List(fields.Str(), allow_none=True)
host_name = fields.Str(allow_none=True)
allowed_hosts = fields.List(fields.Str(), allow_none=True)
include_host_ips = fields.Bool(allow_none=True)
intervals = fields.Nested(IntervalsSchema, allow_none=True)
artifacts_store = fields.Nested(ConnectionTypeSchema, allow_none=True)
connections = fields.List(fields.Nested(ConnectionTypeSchema), allow_none=True)
log_level = fields.Str(allow_none=True)
security_context = fields.Nested(SecurityContextSchema, allow_none=True)
external_services = fields.Nested(ExternalServicesSchema, allow_none=True)
debug_mode = fields.Bool(allow_none=True)
organization_key = fields.Str(allow_none=True)
auth = fields.Nested(AuthSchema, allow_none=True)
proxy = fields.Nested(ProxySchema, allow_none=True)
ui = fields.Nested(UISchema, allow_none=True)
include_chart_revision = fields.Bool(allow_none=True)
operators = fields.Nested(OperatorsSchema, allow_none=True)
istio = fields.Dict(allow_none=True)
# Pending validation
dns = fields.Raw(allow_none=True)
@staticmethod
def schema_config():
return DeploymentConfig
@validates_schema
@check_partial
def validate_connections(self, data, **kwargs):
validate_connections(data.get("artifacts_store"), data.get("connections"))
@validates_schema
@check_partial
def validate_deployment(self, data, **kwargs):
validate_deployment_chart(
deployment_chart=data.get("deployment_chart"),
agent=data.get("agent"),
environment=data.get("environment"),
)
validate_platform_deployment(
postgresql=data.get("postgresql"),
redis=data.get("redis"),
rabbitmq=data.get("rabbitmq"),
broker=data.get("broker"),
scheduler=data.get("scheduler"),
compiler=data.get("compiler"),
worker=data.get("worker"),
beat=data.get("beat"),
external_services=data.get("external_services"),
)
validate_gateway(data.get("gateway"))
if data.get("deployment_chart") == DeploymentCharts.AGENT:
wrong_agent_deployment_keys(
polyaxon_secret=data.get("polyaxon_secret"),
internal_token=data.get("internal_token"),
password_length=data.get("password_length"),
user=data.get("user"),
global_replicas=data.get("global_replicas"),
global_concurrency=data.get("global_concurrency"),
api=data.get("api"),
scheduler=data.get("scheduler"),
worker=data.get("worker"),
beat=data.get("beat"),
tables_hook=data.get("tables_hook"),
api_hooks=data.get("api_hooks"),
hooks=data.get("hooks"),
flower=data.get("flower"),
postgresql=data.get("postgresql"),
redis=data.get("redis"),
rabbitmq=data.get("rabbitmq"),
broker=data.get("broker"),
email=data.get("email"),
ldap=data.get("ldap"),
intervals=data.get("intervals"),
metrics=data.get("metrics"),
organization_key=data.get("organization_key"),
ui=data.get("ui"),
)
class DeploymentConfig(BaseConfig):
SCHEMA = DeploymentSchema
def __init__(
self,
deployment_type=None,
deployment_chart=None,
deployment_version=None,
release_name=None,
namespace=None,
rbac=None,
polyaxon_secret=None,
internal_token=None,
password_length=None,
ssl=None,
encryption_secret=None,
platform_secret=None,
agent_secret=None,
timezone=None,
environment=None,
ingress=None,
user=None,
node_selector=None,
tolerations=None,
affinity=None,
celery_node_selector=None,
celery_tolerations=None,
celery_affinity=None,
limit_resources=None,
global_replicas=None,
global_concurrency=None,
gateway=None,
api=None,
streams=None,
scheduler=None,
compiler=None,
worker=None,
beat=None,
agent=None,
operator=None,
init=None,
sidecar=None,
notifier=None,
cleaner=None,
default_scheduling=None,
default_image_pull_secrets=None,
tables_hook=None,
clean_hooks=None,
api_hooks=None,
hooks=None,
flower=None,
postgresql=None,
redis=None,
rabbitmq=None,
broker=None,
email=None,
ldap=None,
metrics=None,
image_pull_secrets=None,
host_name=None,
allowed_hosts=None,
include_host_ips=None,
intervals=None,
artifacts_store=None,
connections=None,
log_level=None,
security_context=None,
external_services=None,
debug_mode=None,
auth=None,
proxy=None,
organization_key=None,
dns=None,
ui=None,
operators=None,
istio=None,
include_chart_revision=None,
):
validate_deployment_chart(
deployment_chart=deployment_chart,
agent=agent,
environment=environment,
)
validate_platform_deployment(
postgresql=postgresql,
redis=redis,
rabbitmq=rabbitmq,
broker=broker,
scheduler=scheduler,
compiler=compiler,
worker=worker,
beat=beat,
external_services=external_services,
)
validate_gateway(gateway)
self.deployment_type = deployment_type
self.deployment_chart = deployment_chart or DeploymentCharts.PLATFORM
self.deployment_version = deployment_version
self.release_name = release_name
self.namespace = namespace
self.rbac = rbac
self.polyaxon_secret = polyaxon_secret
self.internal_token = internal_token
self.password_length = password_length
self.ssl = ssl
self.dns = dns
self.encryption_secret = encryption_secret
self.platform_secret = platform_secret
self.agent_secret = agent_secret
self.timezone = timezone
self.environment = environment
self.ingress = ingress
self.user = user
self.node_selector = node_selector
self.tolerations = tolerations
self.affinity = affinity
self.celery_node_selector = celery_node_selector
self.celery_tolerations = celery_tolerations
self.celery_affinity = celery_affinity
self.limit_resources = limit_resources
self.global_replicas = global_replicas
self.global_concurrency = global_concurrency
self.gateway = gateway
self.api = api
self.streams = streams
self.scheduler = scheduler
self.compiler = compiler
self.worker = worker
self.beat = beat
self.agent = agent
self.operator = operator
self.init = init
self.sidecar = sidecar
self.notifier = notifier
self.cleaner = cleaner
self.default_scheduling = default_scheduling
self.default_image_pull_secrets = default_image_pull_secrets
self.tables_hook = tables_hook
self.clean_hooks = clean_hooks
self.api_hooks = api_hooks
self.hooks = hooks
self.flower = flower
self.postgresql = postgresql
self.redis = redis
self.rabbitmq = rabbitmq
self.broker = broker
self.email = email
self.ldap = ldap
self.metrics = metrics
self.image_pull_secrets = image_pull_secrets
self.host_name = host_name
self.allowed_hosts = allowed_hosts
self.include_host_ips = include_host_ips
self.intervals = intervals
self.artifacts_store = artifacts_store
self.connections = connections
self.log_level = log_level
self.security_context = security_context
self.external_services = external_services
self.debug_mode = debug_mode
self.auth = auth
self.proxy = proxy
self.organization_key = organization_key
self.ui = ui
self.operators = operators
self.istio = istio
self.include_chart_revision = include_chart_revision
if self.deployment_chart == DeploymentCharts.AGENT:
wrong_agent_deployment_keys(
polyaxon_secret=polyaxon_secret,
internal_token=internal_token,
password_length=password_length,
platform_secret=platform_secret,
encryption_secret=encryption_secret,
user=user,
global_replicas=global_replicas,
global_concurrency=global_concurrency,
api=api,
scheduler=scheduler,
compiler=compiler,
worker=worker,
beat=beat,
tables_hook=tables_hook,
api_hooks=api_hooks,
hooks=hooks,
flower=flower,
postgresql=postgresql,
redis=redis,
rabbitmq=rabbitmq,
broker=broker,
email=email,
ldap=ldap,
intervals=intervals,
metrics=metrics,
organization_key=organization_key,
ui=ui,
)
| |
# -*- coding: utf-8 -*-
"""
sale
"""
from trytond.model import fields
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
from trytond.pyson import Eval, Or, Bool
__all__ = ['Sale']
__metaclass__ = PoolMeta
class Sale:
__name__ = 'sale.sale'
channel = fields.Many2One(
'sale.channel', 'Channel', required=True, select=True, domain=[
('id', 'in', Eval('context', {}).get('allowed_read_channels', [])),
],
states={
'readonly': Or(
(Eval('id', default=0) > 0),
Bool(Eval('lines', default=[])),
)
}, depends=['id']
)
channel_type = fields.Function(
fields.Char('Channel Type'), 'on_change_with_channel_type'
)
has_channel_exception = fields.Function(
fields.Boolean('Has Channel Exception ?'), 'get_has_channel_exception',
searcher='search_has_channel_exception'
)
exceptions = fields.One2Many(
"channel.exception", "origin", "Exceptions"
)
@classmethod
def search_has_channel_exception(cls, name, clause):
"""
Returns domain for sale with exceptions
"""
if clause[2]:
return [('exceptions.is_resolved', '=', False)]
else:
return [
'OR',
[('exceptions', '=', None)],
[('exceptions.is_resolved', '=', True)],
]
def get_channel_exceptions(self, name=None):
ChannelException = Pool().get('channel.exception')
return map(
int, ChannelException.search([
('origin', '=', '%s,%s' % (self.__name__, self.id)),
('channel', '=', self.channel.id),
], order=[('is_resolved', 'desc')])
)
@classmethod
def set_channel_exceptions(cls, exceptions, name, value):
pass
def get_has_channel_exception(self, name):
"""
Returs True if sale has exception
"""
ChannelException = Pool().get('channel.exception')
return bool(
ChannelException.search([
('origin', '=', '%s,%s' % (self.__name__, self.id)),
('channel', '=', self.channel.id),
('is_resolved', '=', False)
])
)
@classmethod
def __setup__(cls):
super(Sale, cls).__setup__()
cls._error_messages.update({
'channel_missing': (
'Go to user preferences and select a current_channel ("%s")'
),
'channel_change_not_allowed': (
'Cannot change channel'
),
'not_create_channel': (
'You cannot create order under this channel because you do not '
'have required permissions'
),
})
@classmethod
def default_channel(cls):
User = Pool().get('res.user')
user = User(Transaction().user)
channel_id = Transaction().context.get('current_channel')
if channel_id:
return channel_id
return user.current_channel and \
user.current_channel.id # pragma: nocover
@staticmethod
def default_company():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).company.id
return Transaction().context.get('company') # pragma: nocover
@staticmethod
def default_invoice_method():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Config = Pool().get('sale.configuration')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
config = Config(1)
return config.sale_invoice_method
return Channel(channel_id).invoice_method
@staticmethod
def default_shipment_method():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Config = Pool().get('sale.configuration')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
config = Config(1)
return config.sale_invoice_method
return Channel(channel_id).shipment_method
@staticmethod
def default_warehouse():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
Location = Pool().get('stock.location')
channel_id = Sale.default_channel()
if not channel_id: # pragma: nocover
return Location.search([('type', '=', 'warehouse')], limit=1)[0].id
else:
return Channel(channel_id).warehouse.id
@staticmethod
def default_price_list():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).price_list.id
return None # pragma: nocover
@staticmethod
def default_payment_term():
Sale = Pool().get('sale.sale')
Channel = Pool().get('sale.channel')
channel_id = Sale.default_channel()
if channel_id:
return Channel(channel_id).payment_term.id
return None # pragma: nocover
@fields.depends('channel', 'party')
def on_change_channel(self):
if not self.channel:
return {} # pragma: nocover
res = {}
for fname in ('company', 'warehouse', 'currency', 'payment_term'):
fvalue = getattr(self.channel, fname)
if fvalue:
res[fname] = fvalue.id
if (not self.party or not self.party.sale_price_list):
res['price_list'] = self.channel.price_list.id # pragma: nocover
if self.channel.invoice_method:
res['invoice_method'] = self.channel.invoice_method
if self.channel.shipment_method:
res['shipment_method'] = self.channel.shipment_method
# Update AR record
for key, value in res.iteritems():
if '.' not in key:
setattr(self, key, value)
return res
@fields.depends('channel')
def on_change_party(self): # pragma: nocover
res = super(Sale, self).on_change_party()
channel = self.channel
if channel:
if not res.get('price_list') and res.get('invoice_address'):
res['price_list'] = channel.price_list.id
res['price_list.rec_name'] = channel.price_list.rec_name
if not res.get('payment_term') and res.get('invoice_address'):
res['payment_term'] = channel.payment_term.id
res['payment_term.rec_name'] = \
self.channel.payment_term.rec_name
# Update AR record
for key, value in res.iteritems():
setattr(self, key, value)
return res
@fields.depends('channel')
def on_change_with_channel_type(self, name=None):
"""
Returns the source of the channel
"""
if self.channel:
return self.channel.source
def check_create_access(self, silent=False):
"""
Check sale creation in channel
"""
User = Pool().get('res.user')
user = User(Transaction().user)
if user.id == 0:
return # pragma: nocover
if self.channel not in user.allowed_create_channels:
if silent:
return False
self.raise_user_error('not_create_channel')
return True
@classmethod
def write(cls, sales, values, *args):
"""
Check if channel in sale is is user's create_channel
"""
if 'channel' in values:
# Channel cannot be changed at any cost.
cls.raise_user_error('channel_change_not_allowed')
super(Sale, cls).write(sales, values, *args)
@classmethod
def create(cls, vlist):
"""
Check if user is allowed to create sale in channel
"""
User = Pool().get('res.user')
user = User(Transaction().user)
for values in vlist:
if 'channel' not in values and not cls.default_channel():
cls.raise_user_error(
'channel_missing', (user.rec_name,)
) # pragma: nocover
sales = super(Sale, cls).create(vlist)
for sale in sales:
sale.check_create_access()
return sales
@classmethod
def copy(cls, sales, default=None):
"""
Duplicating records
"""
if default is None:
default = {}
for sale in sales:
if not sale.check_create_access(True):
default['channel'] = cls.default_channel()
return super(Sale, cls).copy(sales, default=default)
| |
import demistomock as demisto
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
from CommonServerUserPython import * # noqa: E402 lgtm [py/polluting-import]
import requests
from typing import Dict, Tuple, List, Union
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# List processes
LIST_PROCESSES_WINDOWS = '2d32a530-0716-4542-afdc-8da3bd47d8bf' # disable-secrets-detection
LIST_PROCESSES_LINUX = '5e58a0e9-450d-4394-8360-159d5e38c280' # disable-secrets-detection
LIST_PROCESSES_MACOS = '020114c2-d000-4876-91b0-97f41a83b067' # disable-secrets-detection
# Kill processes
KILL_PROCESS_WINDOWS = '8d379688-dde1-451d-8fa2-4f29c84baf97' # disable-secrets-detection
KILL_PROCESS_MAC_LINUX = '76577d3a-c1d7-4d10-af9e-5825c3f9d016' # disable-secrets-detection
# Delete file
DELETE_FILE_WINDOWS = '11cb4fae-5516-4391-8a3c-eb09793cd5dd' # disable-secrets-detection
DELETE_FILE_MAC_LINUX = 'bead9799-401d-4b9e-adca-cf41b20c9118' # disable-secrets-detection
# Network isolation
NETWORK_ISOLATION_WINDOWS = '1d01cc84-753d-4060-89a7-463567552a62' # disable-secrets-detection
NETWORK_ISOLATION_MAC_LINUX = 'fd09996a-ef56-49fb-b811-0e5da4bd07ca' # disable-secrets-detection
# Remove network isolation
REMOVE_NETWORK_ISOLATION_WINDOWS = '99bbaea5-df18-40cc-8759-b5fb61527d5a' # disable-secrets-detection
REMOVE_NETWORK_ISOLATION_MAC_LINUX = '5e252298-4c50-4cdd-94c0-d6997b79157c' # disable-secrets-detection
class Client(BaseClient):
"""
Client to use in the Fidelis Endpoint integration. Overrides BaseClient
"""
def __init__(self, server_url: str, username: str, password: str, verify: bool, proxy: bool):
super().__init__(base_url=server_url, verify=verify, proxy=proxy)
token = self._generate_token(username, password)
self._headers = {'Authorization': f'Bearer {token}'}
def _generate_token(self, username: str, password: str) -> str:
"""Generate a token
Arguments:
username {str} -- Fidelis username to retrieve token with
password {str} -- Fidelis password to retrieve token with
Returns:
token valid for 10 minutes
"""
params = {
'username': username,
'password': password
}
response = self._http_request('GET', '/authenticate', params=params)
if response.get('error'):
raise Exception(response.get('error'))
token = response.get('data', {}).get('token', '')
return token
def test_module_request(self):
"""Performs basic GET request to check if the API is reachable and authentication is successful.
Returns:
Response content
"""
suffix = '/alerts/getalertsV2'
self._http_request('GET', suffix, params={'take': 1})
def list_alerts(self, limit: str = None, sort: str = None, start_date=None, end_date=None) -> Dict:
url_suffix = '/alerts/getalertsV2'
params = assign_params(
take=limit,
sort=sort,
startDate=start_date,
endDate=end_date
)
return self._http_request('GET', url_suffix, params=params)
def get_host_info(self, host_name: str, ip_address: str) -> Dict:
url_suffix = '/endpoints/v2/0/100/hostname Ascending'
if host_name:
field_name = 'HostName'
value = host_name
elif ip_address:
field_name = 'IpAddress'
value = ip_address
params = {
'accessType': '3',
'search': json.dumps({
'searchFields': [{
'fieldName': field_name,
'values': [{
'value': value
}]
}]
})
}
return self._http_request('GET', url_suffix, params=params)
def search_file(self, host=None, md5=None, file_extension=None, file_path=None, file_size=None) -> Dict:
url_suffix = '/files/search'
body = assign_params(
hosts=host,
md5Hashes=md5,
fileExtensions=file_extension,
filePathHints=file_path,
fileSize=file_size
)
return self._http_request('POST', url_suffix, json_data=body)
def file_search_status(self, job_id: str = None, job_result_id: str = None) -> Dict:
url_suffix = f'/jobs/getjobstatus/{job_id}/{job_result_id}'
return self._http_request('GET', url_suffix)
def file_search_results_metadata(self, job_id: str = None, job_result_id: str = None) -> Dict:
url_suffix = f'/jobs/{job_id}/jobresults/{job_result_id}'
return self._http_request('GET', url_suffix)
def get_file(self, file_id: str = None) -> Union[str, bytes]:
url_suffix = f'/files/{file_id}'
return self._http_request('GET', url_suffix, resp_type='content')
def delete_job(self, job_id: str = None) -> Dict:
url_suffix = f'/jobs/{job_id}'
return self._http_request('DELETE', url_suffix)
def list_scripts(self) -> Dict:
url_suffix = '/packages'
return self._http_request('GET', url_suffix)
def script_manifest(self, script_id: str = None) -> Dict:
url_suffix = f'/packages/{script_id}?type=Manifest'
return self._http_request('GET', url_suffix)
def execute_script(self, script_id: str = None, endpoint_ip: str = None, answer: Union[str, int] = None,
time_out: int = None, additional_answer: Union[None, str] = None) -> Dict:
url_suffix = '/jobs/createTask'
body = {
'queueExpirationInhours': None,
'wizardOverridePassword': False,
'impersonationUser': None,
'impersonationPassword': None,
'priority': None,
'timeoutInSeconds': time_out,
'packageId': script_id,
'endpoints': endpoint_ip,
'isPlaybook': False,
'taskOptions': [
{
'integrationOutputFormat': None,
'scriptId': script_id,
'questions': [
{
'paramNumber': 1,
'answer': answer
},
{
'paramNumber': 2,
'answer': additional_answer,
}
]
}
]
}
return self._http_request('POST', url_suffix, json_data=body)
def convert_ip_to_endpoint_id(self, ip: list = None) -> Dict:
url_suffix = '/endpoints/endpointidsbyip'
body = ip
return self._http_request('POST', url_suffix, json_data=body)
def convert_name_to_endpoint_id(self, endpoint_name: list = None) -> Dict:
url_suffix = '/endpoints/endpointidsbyname'
body = endpoint_name
return self._http_request('POST', url_suffix, json_data=body)
def list_process(self, script_id: str = None, time_out: int = None, endpoint_id: str = None) -> Dict:
url_suffix = '/jobs/createTask'
body = {
'queueExpirationInhours': None,
'wizardOverridePassword': False,
'impersonationUser': None,
'impersonationPassword': None,
'priority': None,
'timeoutInSeconds': time_out,
'packageId': script_id,
'endpoints': endpoint_id,
'isPlaybook': False,
'taskOptions': [
{
'integrationOutputFormat': None,
'scriptId': script_id,
'questions': [
{
'paramNumber': 1,
'answer': True,
},
{
'paramNumber': 2,
'answer': True,
},
{
'paramNumber': 3,
'answer': True
}
]
}
]
}
return self._http_request('POST', url_suffix, json_data=body)
def script_job_results(self, job_id: str = None) -> Dict:
url_suffix = f'/jobresults/{job_id}'
return self._http_request('POST', url_suffix)
def kill_process(self, script_id: str = None, pid: int = None, time_out: int = None,
endpoint_ip=None) -> Dict:
url_suffix = '/jobs/createTask'
body = {
'queueExpirationInhours': None,
'wizardOverridePassword': False,
'impersonationUser': None,
'impersonationPassword': None,
'priority': None,
'timeoutInSeconds': time_out,
'packageId': script_id,
'endpoints': endpoint_ip,
'isPlaybook': False,
'taskOptions': [
{
'integrationOutputFormat': None,
'scriptId': script_id,
'questions': [
{
'paramNumber': 1,
'answer': pid
}
]
}
]
}
return self._http_request('POST', url_suffix, json_data=body)
def delete_file(self, script_id: str = None, file_path: str = None, time_out: int = None, endpoint_ip=None) -> Dict:
url_suffix = '/jobs/createTask'
body = {
'queueExpirationInhours': None,
'wizardOverridePassword': False,
'impersonationUser': None,
'impersonationPassword': None,
'priority': None,
'timeoutInSeconds': time_out,
'packageId': script_id,
'endpoints': endpoint_ip,
'isPlaybook': False,
'taskOptions': [
{
'integrationOutputFormat': None,
'scriptId': script_id,
'questions': [
{
'paramNumber': 1,
'answer': file_path
}
]
}
]
}
return self._http_request('POST', url_suffix, json_data=body)
def network_isolation(self, script_id: str = None, allowed_server: str = None, time_out: int = None,
endpoint_ip=None) -> Dict:
url_suffix = '/jobs/createTask'
body = {
'queueExpirationInhours': None,
'wizardOverridePassword': False,
'impersonationUser': None,
'impersonationPassword': None,
'priority': None,
'timeoutInSeconds': time_out,
'packageId': script_id,
'endpoints': endpoint_ip,
'isPlaybook': False,
'taskOptions': [
{
'integrationOutputFormat': None,
'scriptId': script_id,
'questions': [
{
'paramNumber': 1,
'answer': allowed_server
}
]
}
]
}
return self._http_request('POST', url_suffix, json_data=body)
def remove_network_isolation(self, script_id: str = None, time_out: int = None, endpoint_ip: list = None) -> Dict:
url_suffix = '/jobs/createTask'
body: dict = {
'queueExpirationInhours': None,
'wizardOverridePassword': False,
'impersonationUser': None,
'impersonationPassword': None,
'priority': None,
'timeoutInSeconds': time_out,
'packageId': script_id,
'endpoints': endpoint_ip,
'isPlaybook': False,
'taskOptions': [
{
'integrationOutputFormat': None,
'scriptId': script_id,
'questions': [
{}
]
}
]
}
return self._http_request('POST', url_suffix, json_data=body)
def get_script_job_status(self, job_result_id: str = None) -> Dict:
url_suffix = f'/jobs/getjobtargets/{job_result_id}'
return self._http_request('GET', url_suffix)
def query_file_by_hash(self, limit: str = None, start_time: str = None, end_time: str = None, logic: str = None,
file_hash: str = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName', 'eventType', 'processStartTime', 'parentName', 'pid', 'name', 'path', 'user', 'hash',
'parameters'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': 'file',
'filter': {
'filterType': 'composite',
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': 'hash',
'operator': '=',
'value': file_hash
}
]
}
}
}
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def query_by_process_name(self, limit: str = None, start_time: str = None,
end_time: str = None, logic: str = None,
process_name: str = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName', 'eventType', 'processStartTime', 'parentName', 'pid', 'name', 'path', 'user', 'hash',
'parameters'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': 'process',
'filter': {
'filterType': 'composite',
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': 'name',
'operator': '=',
'value': process_name
}
]
}
}
}
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def query_by_remote_ip(self, limit: str = None, start_time: str = None,
end_time: str = None, logic: str = None, remote_ip: str = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName', 'eventType', 'endpointId', 'parentName', 'ppid', 'user', 'localIP', 'localPort',
'remoteIP', 'remotePort', 'processStartTime', 'firstEventTime', 'lastEventTime',
'protocol', 'parentHashSHA1'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': 'network',
'filter': {
'filterType': "composite",
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': 'remoteIP',
'operator': '=',
'value': remote_ip
}
]
}
}
}
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def query_by_dns_request(self, limit: str = None, start_time: str = None,
end_time: str = None, logic: str = None, url: str = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': 'dns',
'filter': {
'filterType': 'composite',
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': 'dnsQuestion',
'operator': '=~',
'value': url
}
]
}
}
}
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def query_by_dns_server_ip(self, limit: str = None, start_time: str = None,
end_time: str = None, logic: str = None,
remote_ip: str = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': 'dns',
'filter': {
'filterType': 'composite',
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': 'remoteIP',
'operator': '=',
'value': remote_ip
}
]
}
}
}
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def query_by_dns_source_ip(self, limit: str = None, start_time: str = None,
end_time: str = None, logic: str = None, source_ip: str = None,
domain: str = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': 'dns',
'filter': {
'filterType': 'composite',
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': 'dnsQuestion',
'operator': '=~',
'value': domain
},
{
'filterType': 'criteria',
'column': 'localIP',
'operator': '=',
'value': source_ip
}
]
}
}
}
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def query_events(self, limit: str = None, start_time: str = None,
end_time: str = None, logic: str = None, column: str = None,
value: str = None, entity_type: str = None, operator: str = None,
additional_filter: Dict = None) -> Dict:
url_suffix = '/v2/events'
params = assign_params(pageSize=limit)
body = {
'dateRange': {
'start': start_time,
'end': end_time
},
'resultFields':
['endpointName', 'eventType', 'processStartTime', 'parentName', 'pid', 'name', 'path', 'user', 'hash',
'parameters'],
'criteriaV3': {
'relationshipFilter': None,
'entityType': entity_type,
'filter': {
'filterType': 'composite',
'logic': logic,
'filters': [
{
'filterType': 'criteria',
'column': column,
'operator': operator,
'value': value
}
]
}
}
}
if additional_filter:
body['criteriaV3']['filter']['filters'].append(additional_filter) # type: ignore
response = self._http_request('POST', url_suffix, params=params, json_data=body)
if response.get('error'):
raise Exception(response.get('error'))
return response
def get_endpoint_id(client: Client, endpoint_ip: list = None, endpoint_name: list = None):
if endpoint_name and endpoint_ip:
raise Exception('You must provide only one of the arguments endpoint_ip or endpoint_name')
if not endpoint_ip and not endpoint_name:
raise Exception('You must provide either endpoint_ip or endpoint_name')
if endpoint_ip:
endpoints = client.convert_ip_to_endpoint_id(endpoint_ip)
endpoint_id = endpoints.get('data')
elif endpoint_name:
endpoints = client.convert_name_to_endpoint_id(endpoint_name)
endpoint_id = endpoints.get('data')
return endpoint_id
def test_module(client: Client, fetch_limit: str, *_) -> Tuple[str, Dict, Dict]:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
"""
client.test_module_request()
if demisto.params().get('isFetch'):
if int(fetch_limit) < 5:
return 'Fetch limit must be at lest 5', {}, {}
return 'ok', {}, {}
def list_alerts_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
limit = args.get('limit', '50')
sort = args.get('sort')
start_date = args.get('start_date')
end_date = args.get('end_date')
headers = ['ID', 'Name', 'EndpointName', 'EndpointID', 'Source', 'ArtifactName', 'IntelName', 'Severity',
'CreateDate', 'AlertDate']
contents = []
context = []
response = client.list_alerts(limit, sort, start_date, end_date)
if not response.get('success'):
raise Exception(response.get('error'))
alerts = response.get('data', {}).get('entities', [])
if not alerts:
return 'No alerts were found.', {}, {}
for alert in alerts:
contents.append({
'Name': alert.get('name'),
'ID': alert.get('id'),
'EndpointName': alert.get('endpointName'),
'EndpointID': alert.get('endpointId'),
'Source': alert.get('source'),
'ArtifactName': alert.get('artifactName'),
'IntelName': alert.get('intelName'),
'Severity': alert.get('severity'),
'CreateDate': alert.get('createDate')
})
context.append({
'Name': alert.get('name'),
'ID': alert.get('id'),
'EndpointName': alert.get('endpointName'),
'EndpointID': alert.get('endpointId'),
'Source': alert.get('source'),
'ArtifactName': alert.get('artifactName'),
'IntelName': alert.get('intelName'),
'Severity': alert.get('severity'),
'CreateDate': alert.get('createDate'),
'HasJob': alert.get('hasJob'),
'Description': alert.get('description'),
'IntelID': alert.get('intelId'),
'SourceType': alert.get('sourceType'),
'ValidatedDate': alert.get('validatedDate'),
'EventID': alert.get('eventId'),
'ActionsTaken': alert.get('actionsTaken'),
'EventTime': alert.get('eventTime'),
'ParentEventID': alert.get('parentEventId'),
'EventType': alert.get('eventType'),
'EventIndex': alert.get('eventIndex'),
'Telemetry': alert.get('telemetry'),
'ReportID': alert.get('reportId'),
'InsertionDate': alert.get('insertionDate'),
'AgentTag': alert.get('agentTag')
})
entry_context = {'FidelisEndpoint.Alert(val.AlertID && val.AlertID === obj.AlertID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint Alerts', contents, headers, removeNull=True)
return human_readable, entry_context, response
def host_info_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
ip_address = args.get('ip_address', '')
host = args.get('host', '')
if not host and not ip_address:
raise Exception('You must provide either ip_address or host')
contents = []
context_standards = []
headers = ['ID', 'HostName', 'IpAddress', 'OS', 'MacAddress', 'Isolated', 'LastContactDate', 'AgentInstalled',
'AgentVersion', 'OnNetwork', 'AV_Enabled', 'Groups', 'ProcessorName']
response = client.get_host_info(host, ip_address)
if not response.get('success'):
raise Exception(response.get('error'))
hosts = response.get('data', {})
if not hosts:
return 'No hosts was found', {}, {}
host_info = hosts.get('entities', [])
if not host_info:
return 'No entities were found for the host', {}, {}
for host in host_info:
contents.append({
'Hostname': host.get('hostName'),
'ID': host.get('id'),
'IPAddress': host.get('ipAddress'),
'OS': host.get('os'),
'MacAddress': host.get('macAddress'),
'LastContactDate': host.get('lastContactDate'),
'AgentInstalled': host.get('agentInstalled'),
'AgentVersion': host.get('agentVersion'),
'AV_Enabled': host.get('aV_Enabled'),
'Isolated': host.get('isolated'),
'OnNetwork': host.get('onNetwork'),
'Groups': host.get('groups'),
'ProcessorName': host.get('processorName')
})
context_standards.append({
'Hostname': host.get('hostName'),
'ID': host.get('id'),
'IPAddress': host.get('ipAddress'),
'OS': host.get('os'),
'MACAddress': host.get('macAddress'),
'Processor': host.get('processorName')
})
entry_context = {
'FidelisEndpoint.Host(val.ID && val.ID === obj.ID)': contents,
'Endpoint(val.ID && val.ID === obj.ID)': context_standards
}
human_readable = tableToMarkdown('Fidelis Endpoint Host Info', contents, headers=headers, removeNull=True)
return human_readable, entry_context, response
def file_search(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
""" Search for files on multiple hosts, using file hash, extension, file size, and other search criteria."""
host = argToList(args.get('host', ['']))
md5 = argToList(args.get('md5'))
file_extension = argToList(args.get('file_extension'))
file_path = argToList(args.get('file_path'))
try:
file_size = {
'value': int(args.get('file_size')), # type: ignore
'quantifier': 'greaterThan'
}
except Exception as e:
raise Exception(e)
response = client.search_file(host, md5, file_extension, file_path, file_size)
if not response.get('success'):
raise Exception(response.get('error'))
data = response.get('data', {})
contents = {
'JobID': data.get('jobId'),
'JobResultID': data.get('jobResultId')
}
entry_context = {'FidelisEndpoint.FileSearch(val.JobID && val.JobID === obj.JobID)': contents}
human_readable = tableToMarkdown('Fidelis Endpoint file search', contents)
return human_readable, entry_context, response
def file_search_status(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
"""Get the file search job status"""
job_id = args.get('job_id')
job_result_id = args.get('job_result_id')
response = client.file_search_status(job_id, job_result_id)
if not response.get('success'):
raise Exception(response.get('error'))
data = response.get('data', {})
if not data:
return 'Could not find any data for this Job ID', {}, {}
contents = {
'JobID': job_id,
'JobResultID': job_result_id,
'Status': data.get('status', 'Unclassified')
}
status = data.get('status')
entry_context = {'FidelisEndpoint.FileSearch(val.JobID && val.JobID === obj.JobID)': contents}
human_readable = f'Fidelis Endpoint file search status is: {status}'
return human_readable, entry_context, response
def file_search_reasult_metadata(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
"""Get the job results metadata"""
job_id = args.get('job_id')
job_result_id = args.get('job_result_id')
headers = ['ID', 'FileName', 'FilePath', 'MD5Hash', 'FileSize', 'HostName', 'HostIP', 'AgentID']
response = client.file_search_results_metadata(job_id, job_result_id)
if not response.get('success'):
return 'Could not find results for this job ID.', {}, {}
data = response.get('data', {}).get('jobResultInfos', [])
if not data:
return 'No results found.\nCheck the job status, it might be still running.', {}, {}
contents = {}
file_standards = {}
for item in data:
if item.get('collectedFiles'):
collected_files = item.get('collectedFiles', [])
for obj in collected_files:
contents = {
'FileName': obj.get('name'),
'ID': obj.get('id'),
'MD5Hash': obj.get('mD5Hash'),
'FilePath': obj.get('filePath'),
'FileSize': obj.get('fileSize'),
'HostName': item.get('hostName'),
'HostIP': item.get('hostIP'),
'AgentID': item.get('agentId')
}
file_standards = {
'Name': obj.get('name'),
'MD5': obj.get('mD5Hash'),
'Path': obj.get('filePath'),
'Size': obj.get('fileSize'),
'Hostname': item.get('hostName')
}
entry_context = {
'FidelisEndpoint.File(val.ID && val.ID === obj.ID)': contents,
outputPaths['file']: file_standards
}
human_readable = tableToMarkdown('Fidelis Endpoint file results metadata', contents, headers=headers, removeNull=True)
return human_readable, entry_context, response
def get_file_command(client: Client, args: dict):
file_id: str = args.get('file_id', '')
file_name: str = args.get('file_name', '')
response = client.get_file(file_id)
attachment_file = fileResult(file_name, response)
return attachment_file
def delete_file_search_job_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
job_id = args.get('job_id')
response = client.delete_job(job_id)
if not response.get('success'):
raise Exception(response.get('error'))
return 'The job was successfully deleted', {}, response
def list_scripts_command(client: Client, *_) -> Tuple[str, Dict, Dict]:
headers = ['ID', 'Name', 'Description']
response = client.list_scripts()
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
scripts = res.get('scripts', [])
if not scripts:
return 'No scripts were found.', {}, {}
contents = []
for script in scripts:
contents.append({
'ID': script.get('id'),
'Name': script.get('name'),
'Description': script.get('description')
})
entry_context = {'FidelisEndpoint.Script(val.ID && val.ID === obj.ID)': contents}
human_readable = tableToMarkdown('Fidelis Endpoint scripts', contents, headers)
return human_readable, entry_context, response
def script_manifest_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
script_id = args.get('script_id')
headers = ['ID', 'Name', 'Description', 'Platform', 'Command', 'Questions', 'Priority', 'TimeoutSeconds',
'ResultColumns', 'ImpersonationUser', 'ImpersonationPassword', 'WizardOverridePassword']
response = client.script_manifest(script_id)
if not response.get('success'):
raise Exception(response.get('error'))
data = response.get('data', {})
platforms = [k for k, v in data.get('platforms', {}).items() if v]
contents = {
'ID': data.get('id'),
'Name': data.get('name'),
'Platform': platforms,
'Description': data.get('description'),
'Priority': data.get('priority'),
'ResultColumns': data.get('resultColumns'),
'TimeoutSeconds': data.get('timeoutSeconds'),
'ImpersonationUser': data.get('impersonationUser'),
'ImpersonationPassword': data.get('impersonationPassword'),
'Command': data.get('command'),
'WizardOverridePassword': data.get('wizardOverridePassword'),
'Questions': data.get('questions')
}
entry_context = {'FidelisEndpoint.Script(val.ID && val.ID === obj.ID)': contents}
human_readable = tableToMarkdown('Fidelis Endpoint script manifest', contents, headers, removeNull=True)
return human_readable, entry_context, response
def execute_script_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
script_id = args.get('script_id')
time_out = args.get('time_out')
endpoint_ip = argToList(args.get('endpoint_ip'))
endpoint_name = argToList(args.get('endpoint_name'))
answer = args.get('answer')
additional_answer = args.get('additional_answer', '')
endpoint_id = get_endpoint_id(client, endpoint_ip, endpoint_name)
response = client.execute_script(script_id, endpoint_id, answer, time_out, additional_answer)
if not response.get('success'):
raise Exception(response.get('error'))
job_id = response.get('data')
context = {
'ID': script_id,
'JobID': job_id
}
entry_context = {'FidelisEndpoint.Script(val.ID && val.ID === obj.ID)': context}
return f'The job has been executed successfully. \n Job ID: {job_id}', entry_context, response
def list_process_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
endpoint_ip = argToList(args.get('endpoint_ip'))
endpoint_name = argToList(args.get('endpoint_name'))
endpoint_id = get_endpoint_id(client, endpoint_ip, endpoint_name)
time_out = args.get('time_out')
operating_system = args.get('operating_system')
script_id = ''
if operating_system == 'Windows':
script_id = LIST_PROCESSES_WINDOWS
elif operating_system == 'Linux':
script_id = LIST_PROCESSES_LINUX
elif operating_system == 'macOS':
script_id = LIST_PROCESSES_MACOS
response = client.list_process(script_id, time_out, endpoint_id)
if not response.get('success'):
raise Exception(response.get('error'))
job_id = response.get('data')
context = {
'ID': script_id,
'JobID': job_id
}
entry_context = {'FidelisEndpoint.Process(val.ID && val.ID === obj.ID)': context}
return f'The job has been executed successfully. \n Job ID: {job_id}', entry_context, response
def get_script_result(client: Client, args: dict):
job_id = args.get('job_id')
headers = ['ID', 'Name', 'EndpointID', 'EndpointName', 'PID', 'User', 'SHA1', 'MD5', 'Path', 'WorkingDirectory',
'StartTime']
response = client.script_job_results(job_id)
if not response.get('success'):
raise Exception(response.get('error'))
hits = response.get('data', {}).get('hits', {}).get('hits', [])
if not hits:
return 'No results were found', {}, {}
contents = []
context = []
for hit in hits:
source_ = hit.get('_source', {})
contents.append({
'Path': source_.get('Path'),
'User': source_.get('User'),
'SHA1': source_.get('SHA1'),
'WorkingDirectory': source_.get('Working Directory'),
'EndpointID': source_.get('_EndpointId'),
'PID': source_.get('PID'),
'StartTime': source_.get('Start Time'),
'EndpointName': source_.get('_EndpointName'),
'Name': source_.get('Name'),
'MD5': source_.get('MD5'),
'ID': hit.get('_id'),
})
context.append({
'Path': source_.get('Path'),
'User': source_.get('User'),
'SHA1': source_.get('SHA1'),
'IsHidden': source_.get('Is Hidden'),
'WorkingDirectory': source_.get('Working Directory'),
'EndpointID': source_.get('_EndpointId'),
'PID': source_.get('PID'),
'StartTime': source_.get('Start Time'),
'EndpointName': source_.get('_EndpointName'),
'Name': source_.get('Name'),
'ParentPID': source_.get('Parent PID'),
'CommandLine': source_.get('Command Line'),
'GroupID': source_.get('_GroupID'),
'MD5': source_.get('MD5'),
'Matches': source_.get('Matches'),
'ID': hit.get('_id'),
'Tags': hit.get('tags')
})
entry_context = {'FidelisEndpoint.ScriptResult(val.ID && val.ID === obj.ID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint script job results', contents, headers, removeNull=True)
return human_readable, entry_context, response
def kill_process_by_pid(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
endpoint_ip = argToList(args.get('endpoint_ip'))
endpoint_name = argToList(args.get('endpoint_name'))
endpoint_id = get_endpoint_id(client, endpoint_ip, endpoint_name)
time_out = args.get('time_out')
operating_system = args.get('operating_system')
pid = args.get('pid')
script_id = ''
if operating_system == 'Windows':
script_id = KILL_PROCESS_WINDOWS
elif operating_system == 'Linux' or operating_system == 'macOS':
script_id = KILL_PROCESS_MAC_LINUX
response = client.kill_process(script_id, pid, time_out, endpoint_id)
if not response.get('success'):
raise Exception(response.get('error'))
job_id = response.get('data')
context = {
'ID': script_id,
'JobID': job_id
}
entry_context = {'FidelisEndpoint.Process(val.ID && val.ID === obj.ID)': context}
return f'The job has been executed successfully. \n Job ID: {job_id}', entry_context, response
def delete_file_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
endpoint_ip = argToList(args.get('endpoint_ip'))
endpoint_name = argToList(args.get('endpoint_name'))
endpoint_id = get_endpoint_id(client, endpoint_ip, endpoint_name)
time_out = args.get('time_out')
operating_system = args.get('operating_system')
file_path = args.get('file_path')
script_id = ''
if operating_system == 'Windows':
script_id = DELETE_FILE_WINDOWS
elif operating_system == 'Linux' or operating_system == 'macOS':
script_id = DELETE_FILE_MAC_LINUX
response = client.delete_file(script_id, file_path, time_out, endpoint_id)
if not response.get('success'):
raise Exception(response.get('error'))
job_id = response.get('data')
context = {
'ID': script_id,
'JobID': job_id
}
entry_context = {'FidelisEndpoint.Script(val.ID && val.ID === obj.ID)': context}
return f'The job has been executed successfully. \n Job ID: {job_id}', entry_context, response
def network_isolation_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
endpoint_ip = argToList(args.get('endpoint_ip'))
endpoint_name = argToList(args.get('endpoint_name'))
endpoint_id = get_endpoint_id(client, endpoint_ip, endpoint_name)
time_out = args.get('time_out')
operating_system = args.get('operating_system')
allowed_server = args.get('allowed_server')
script_id = ''
if operating_system == 'Windows':
script_id = NETWORK_ISOLATION_WINDOWS
elif operating_system == 'Linux' or operating_system == 'macOS':
script_id = NETWORK_ISOLATION_MAC_LINUX
response = client.network_isolation(script_id, allowed_server, time_out, endpoint_id)
if not response.get('success'):
raise Exception(response.get('error'))
job_id = response.get('data')
context = {
'ID': script_id,
'JobID': job_id
}
entry_context = {'FidelisEndpoint.Isolation(val.ID && val.ID === obj.ID)': context}
return f'The job has been executed successfully. \n Job ID: {job_id}', entry_context, response
def remove_network_isolation_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
endpoint_ip = argToList(args.get('endpoint_ip'))
endpoint_name = argToList(args.get('endpoint_name'))
endpoint_id = get_endpoint_id(client, endpoint_ip, endpoint_name)
time_out = args.get('time_out')
operating_system = args.get('operating_system')
script_id = ''
if operating_system == 'Windows':
script_id = REMOVE_NETWORK_ISOLATION_WINDOWS
elif operating_system in {'Linux', 'macOS'}:
script_id = REMOVE_NETWORK_ISOLATION_MAC_LINUX
response = client.remove_network_isolation(script_id, time_out, endpoint_id)
if not response.get('success'):
raise Exception(response.get('error'))
job_id = response.get('data')
context = {
'ID': script_id,
'JobID': job_id
}
entry_context = {'FidelisEndpoint.Isolation(val.ID && val.ID === obj.ID)': context}
return f'The job has been executed successfully. \n Job ID: {job_id}', entry_context, response
def script_job_status(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
job_result_id = args.get('job_result_id')
contents = []
response = client.get_script_job_status(job_result_id)
if not response.get('success'):
raise Exception(response.get('error'))
results = response.get('data', {}).get('targets', [])
for result in results:
contents.append({
'JobResultID': result.get('jobResultId'),
'Name': result.get('name'),
'Status': result.get('status'),
'JobName': response.get('data', {}).get('jobName') # type: ignore
})
entry_context = {'FidelisEndpoint.ScriptResult(val.JobResultID && val.JobResultID === obj.JobResultID)': contents}
human_readable = tableToMarkdown('Fidelis Endpoint script job status', contents, removeNull=True)
return human_readable, entry_context, response
def query_file_by_hash_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
file_hash = args.get('file_hash')
limit = args.get('limit')
if get_hash_type(file_hash) == 'Unknown':
raise Exception('Enter a valid hash format.')
contents = []
context = []
file_standards = []
headers = ['PID', 'EndpointName', 'Name', 'Path', 'User', 'Hash', 'ProcessStartTime', 'Parameters', 'ParentName',
'EventType']
response = client.query_file_by_hash(limit, start_time, end_time, logic, file_hash)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return f'No events were found for file_hash {file_hash}', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ProcessStartTime': event.get('processStartTime'),
'ParentName': event.get('parentName'),
'PID': event.get('pid'),
'Name': event.get('name'),
'Path': event.get('path'),
'User': event.get('user'),
'Hash': event.get('hash'),
'Parameters': event.get('parameters')
})
context.append({
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'ParentName': event.get('parentName'),
'Name': event.get('name'),
'Path': event.get('path'),
'Hash': event.get('hash'),
'Size': event.get('size'),
'FileVersion': event.get('fileVersion'),
'Signature': event.get('signature'),
'SignedTime': event.get('signedTime'),
'CertificateSubjectName': event.get('certificateSubjectName'),
'CertificateIssuerName': event.get('certificateIssuerName'),
'CertificatePublisher': event.get('certificatePublisher'),
'HashSHA1': event.get('hashSHA1'),
'HashSHA256': event.get('hashSHA256'),
'ProcessStartTime': event.get('processStartTime'),
'EventIndex': event.get('eventIndex'),
'IndexingTime': event.get('indexingTime'),
'FileExtension': event.get('fileExtension'),
'FileType': event.get('fileType'),
'FileCategory': event.get('fileCategory'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime')
})
file_standards.append({
'Name': event.get('name'),
'Size': event.get('size'),
'MD5': event.get('hash'),
'Extension': event.get('fileExtension'),
'Type': event.get('fileType'),
'Path': event.get('path'),
'Hostname': event.get('endpointName'),
'SHA1': event.get('hashSHA1'),
'SHA256': event.get('hashSHA256'),
'FileVersion': event.get('fileVersion')
})
entry_context = {
'FidelisEndpoint.Query(val.Hash && val.Hash === obj.Hash)': context,
outputPaths['file']: file_standards
}
human_readable = tableToMarkdown('Fidelis Endpoint file hash query results', contents, headers=headers,
removeNull=True)
return human_readable, entry_context, response
def query_process_name_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
process_name = args.get('process_name')
limit = args.get('limit')
headers = ['PID', 'EndpointName', 'Name', 'Path', 'User', 'Hash', 'ProcessStartTime', 'Parameters', 'ParentName',
'EventType']
contents = []
context = []
response = client.query_by_process_name(limit, start_time, end_time, logic, process_name)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return f'No events were found for the process {process_name}', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ProcessStartTime': event.get('processStartTime'),
'ParentName': event.get('parentName'),
'PID': event.get('pid'),
'Name': event.get('name'),
'Path': event.get('path'),
'User': event.get('user'),
'Hash': event.get('hash'),
'Parameters': event.get('parameters')
})
context.append({
'EsIndex': event.get('esIndex'),
'EsDocumentType': event.get('esDocumentType'),
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'PID': event.get('pid'),
'ParentName': event.get('parentName'),
'Name': event.get('name'),
'Path': event.get('path'),
'Hash': event.get('hash'),
'User': event.get('user'),
'ProcessStartTime': event.get('processStartTime'),
'IndexingTime': event.get('indexingTime'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime')
})
entry_context = {'FidelisEndpoint.Query(val.PID && val.PID === obj.PID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint process results', contents, headers=headers, removeNull=True)
return human_readable, entry_context, response
def query_connection_by_remote_ip_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
remote_ip = args.get('remote_ip')
limit = args.get('limit')
contents = []
context = []
headers = ['EndpointID', 'EndpointName', 'PPID', 'LocalIP', 'LocalPort', 'RemoteIP', 'RemotePort',
'ProcessStartTime', 'FirstEventTime', 'LastEventTime', 'Protocol', 'ParentHashSHA1', 'ParentName',
'EventType']
response = client.query_by_remote_ip(limit, start_time, end_time, logic, remote_ip)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return f'No events were found for the IP address {remote_ip}', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'EndpointID': event.get('endpointId'),
'ProcessStartTime': event.get('processStartTime'),
'ParentName': event.get('parentName'),
'PPID': event.get('ppid'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'FirstEventTime': event.get('firstEventTime'),
'LastEventTime': event.get('lastEventTime'),
'Protocol': event.get('protocol'),
'ParentHashSHA1': event.get('parentHashSHA1')
})
context.append({
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'EndpointID': event.get('endpointId'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'PPID': event.get('ppid'),
'ParentName': event.get('parentName'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'ProcessStartTime': event.get('processStartTime'),
'FirstEventTime': event.get('firstEventTime'),
'LastEventTime': event.get('lastEventTime'),
'Protocol': event.get('protocol'),
'EventIndex': event.get('eventIndex'),
'NetworkDirection': event.get('networkDirection'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime'),
'parentHashSHA1': event.get('parentHashSHA1')
})
entry_context = {'FidelisEndpoint.Query(val.PPID && val.PPID === obj.PPID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint query results for connection by remote IP', contents,
headers=headers, removeNull=True)
return human_readable, entry_context, response
def query_dns_request_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
url = args.get('url')
limit = args.get('limit')
contents = []
context = []
headers = ['EndpointName', 'LocalIP', 'LocalPort', 'RemoteIP', 'RemotePort', 'ProcessStartTime', 'DnsAnswer',
'EventType']
response = client.query_by_dns_request(limit, start_time, end_time, logic, url)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return f'No events were found for the URL {url}', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'DnsAnswer': event.get('dnsAnswer'),
'ProcessStartTime': event.get('processStartTime'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort')
})
context.append({
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'DnsQuestion': event.get('dnsQuestion'),
'DnsAnswer': event.get('dnsAnswer'),
'ProcessStartTime': event.get('processStartTime'),
'EventIndex': event.get('eventIndex'),
'IndexingTime': event.get('indexingTime'),
'NetworkDirection': event.get('networkDirection'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime')
})
entry_context = {'FidelisEndpoint.Query(val.ParentID && val.ParentID === obj.ParentID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint query results for the DNS request', contents, headers=headers,
removeNull=True)
return human_readable, entry_context, response
def query_by_server_ip_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
remote_ip = args.get('remote_ip')
limit = args.get('limit')
contents = []
context = []
headers = ['EndpointName', 'LocalIP', 'LocalPort', 'RemoteIP', 'RemotePort', 'ProcessStartTime', 'DnsAnswer',
'EventType']
response = client.query_by_dns_server_ip(limit, start_time, end_time, logic, remote_ip)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return f'No events were found for the IP address {remote_ip}', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'DnsAnswer': event.get('dnsAnswer'),
'ProcessStartTime': event.get('processStartTime'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort')
})
context.append({
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'DnsQuestion': event.get('dnsQuestion'),
'DnsAnswer': event.get('dnsAnswer'),
'ProcessStartTime': event.get('processStartTime'),
'EventIndex': event.get('eventIndex'),
'IndexingTime': event.get('indexingTime'),
'NetworkDirection': event.get('networkDirection'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime')
})
entry_context = {'FidelisEndpoint.Query(val.TargetID && val.TargetID === obj.TargetID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint query results for the DNS request by server IP', contents,
headers=headers, removeNull=True)
return human_readable, entry_context, response
def query_by_source_ip(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
source_ip = args.get('source_ip')
domain = args.get('domain', '')
limit = args.get('limit')
contents = []
context = []
headers = ['EndpointName', 'LocalIP', 'LocalPort', 'RemoteIP', 'RemotePort', 'ProcessStartTime', 'DnsQuestion',
'DnsAnswer']
response = client.query_by_dns_source_ip(limit, start_time, end_time, logic, source_ip, domain)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return 'No events were found', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'ProcessStartTime': event.get('processStartTime'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'DnsQuestion': event.get('dnsQuestion'),
'DnsAnswer': event.get('dnsAnswer')
})
context.append({
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'DnsQuestion': event.get('dnsQuestion'),
'DnsAnswer': event.get('dnsAnswer'),
'ProcessStartTime': event.get('processStartTime'),
'EventIndex': event.get('eventIndex'),
'IndexingTime': event.get('indexingTime'),
'NetworkDirection': event.get('networkDirection'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime')
})
entry_context = {'FidelisEndpoint.Query(val.TargetID && val.TargetID === obj.TargetID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint query results for the DNS request by source IP', contents,
headers=headers, removeNull=True)
return human_readable, entry_context, response
def query_events_command(client: Client, args: dict) -> Tuple[str, Dict, Dict]:
start_time = args.get('start_time')
end_time = args.get('end_time')
logic = args.get('logic')
entity_type = args.get('entity_type')
column = args.get('column')
value = args.get('value')
operator = args.get('operator')
limit = args.get('limit')
additional_filter_string = args.get('additional_filter')
additional_filter = None
if additional_filter_string:
additional_filter_split = additional_filter_string.split()
if len(additional_filter_split) == 3:
additional_filter = {
'filterType': 'criteria',
'column': additional_filter_split[0],
'operator': additional_filter_split[1],
'value': additional_filter_split[2]
}
else:
raise Exception('Make sure that the additional_filter argument is in valid format.\n '
'For Example: pid = 1234')
contents = []
context = []
headers = ['PID', 'EndpointName', 'User', 'ProcessStartTime', 'LocalIP', 'LocalPort', 'RemoteIP', 'RemotePort',
'ParentID', 'EventType']
response = client.query_events(limit, start_time, end_time, logic, column, value, entity_type, operator,
additional_filter)
if not response.get('success'):
raise Exception(response.get('error'))
res = response.get('data', {})
events = res.get('events', [])
if not events:
return 'No events were found', {}, {}
for event in events:
contents.append({
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'PID': event.get('pid'),
'User': event.get('user'),
'ProcessStartTime': event.get('processStartTime'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort')
})
context.append({
'EventTime': event.get('eventTime'),
'EndpointName': event.get('endpointName'),
'EventType': event.get('eventType'),
'ParentID': event.get('parentId'),
'TargetID': event.get('targetId'),
'PID': event.get('pid'),
'ParentName': event.get('parentName'),
'Name': event.get('name'),
'Path': event.get('path'),
'Hash': event.get('hash'),
'User': event.get('user'),
'LocalIP': event.get('localIP'),
'LocalPort': event.get('localPort'),
'RemoteIP': event.get('remoteIP'),
'RemotePort': event.get('remotePort'),
'DnsQuestion': event.get('dnsQuestion'),
'DnsAnswer': event.get('dnsAnswer'),
'ProcessStartTime': event.get('processStartTime'),
'EventIndex': event.get('eventIndex'),
'IndexingTime': event.get('indexingTime'),
'EntityType': event.get('entityType'),
'StartTime': event.get('startTime')
})
entry_context = {'FidelisEndpoint.Query(val.PID && val.PID === obj.PID)': context}
human_readable = tableToMarkdown('Fidelis Endpoint query events result', contents, headers=headers,
removeNull=True)
return human_readable, entry_context, response
def fetch_incidents(client: Client, fetch_time: str, fetch_limit: str, last_run: Dict) -> Tuple[List, Dict]:
last_fetched_alert_create_time = last_run.get('last_fetched_alert_create_time')
last_fetched_alert_id = last_run.get('last_fetched_alert_id', '')
if not last_fetched_alert_create_time:
last_fetched_alert_create_time, _ = parse_date_range(fetch_time, date_format='%Y-%m-%dT%H:%M:%S.000Z')
last_fetched_alert_id = '0'
latest_alert_create_date = last_fetched_alert_create_time
latest_alert_id = last_fetched_alert_id
incidents = []
response = client.list_alerts(
limit=fetch_limit,
sort='createDate Ascending',
start_date=last_fetched_alert_create_time
)
alerts = response.get('data', {}).get('entities', [])
for alert in alerts:
alert_id = alert.get('id')
if alert_id <= int(last_fetched_alert_id):
# got an alert we already fetched, skipping it
continue
alert_id = str(alert_id)
alert_create_date = alert.get('createDate')
incident = {
'name': f'Fidelis Endpoint alert {alert_id}',
'occurred': alert_create_date,
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
latest_alert_create_date = alert_create_date
latest_alert_id = alert_id
return incidents, \
{'last_fetched_alert_create_time': latest_alert_create_date, 'last_fetched_alert_id': latest_alert_id}
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
# get the service API url
base_url = urljoin(demisto.params().get('url'), '/Endpoint/api')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, username=username, password=password, verify=verify_certificate, proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
fetch_limit = demisto.params().get('fetch_limit')
return_outputs(*test_module(client, fetch_limit))
elif demisto.command() == 'fetch-incidents':
fetch_time = demisto.params().get('fetch_time', '3 days')
fetch_limit = demisto.params().get('fetch_limit', '50')
incidents, last_run = fetch_incidents(client, fetch_time, fetch_limit, last_run=demisto.getLastRun()) # type: ignore
demisto.incidents(incidents)
demisto.setLastRun(last_run)
elif demisto.command() == 'fidelis-endpoint-list-alerts':
return_outputs(*list_alerts_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-host-info':
return_outputs(*host_info_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-file-search':
return_outputs(*file_search(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-file-search-status':
return_outputs(*file_search_status(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-file-search-result-metadata':
return_outputs(*file_search_reasult_metadata(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-get-file':
demisto.results(get_file_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-delete-file-search-job':
return_outputs(*delete_file_search_job_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-list-scripts':
return_outputs(*list_scripts_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-get-script-manifest':
return_outputs(*script_manifest_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-list-processes':
return_outputs(*list_process_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-get-script-result':
return_outputs(*get_script_result(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-kill-process':
return_outputs(*kill_process_by_pid(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-delete-file':
return_outputs(*delete_file_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-isolate-network':
return_outputs(*network_isolation_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-remove-network-isolation':
return_outputs(*remove_network_isolation_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-script-job-status':
return_outputs(*script_job_status(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-execute-script':
return_outputs(*execute_script_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-file':
return_outputs(*query_file_by_hash_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-process':
return_outputs(*query_process_name_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-connection-by-remote-ip':
return_outputs(*query_connection_by_remote_ip_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-by-dns':
return_outputs(*query_dns_request_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-dns-by-server-ip':
return_outputs(*query_by_server_ip_command(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-dns-by-source-ip':
return_outputs(*query_by_source_ip(client, demisto.args()))
elif demisto.command() == 'fidelis-endpoint-query-events':
return_outputs(*query_events_command(client, demisto.args()))
# Log exceptions
except Exception as e:
err_msg = str(e)
if 'password=' in err_msg:
err_msg = re.sub(r'password=([^\s]*)\s', 'password=**** ', err_msg)
return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import calendar
import cgi
import datetime
import gzip
import io
import json
import random
import re
import time
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
def init_config_and_logging(opts):
conf = cfg.CONF
conf.register_cli_opts(opts)
conf.register_opts(opts)
logging.register_options(conf)
logging.set_defaults()
conf(project='stackalytics')
logging.setup(conf, 'stackalytics')
LOG.info('Logging enabled')
conf.log_opt_values(LOG, logging.DEBUG)
def date_to_timestamp(d):
if not d:
return 0
if d == 'now':
return int(time.time())
return int(time.mktime(
datetime.datetime.strptime(d, '%Y-%b-%d').timetuple()))
def date_to_timestamp_ext(d):
try:
return date_to_timestamp(d)
except (ValueError, TypeError):
return int(d)
def member_date_to_timestamp(d):
if not d:
return 0
return int(time.mktime(
datetime.datetime.strptime(d, '%B %d, %Y ').timetuple()))
def iso8601_to_timestamp(s):
return calendar.timegm(iso8601.parse_date(s).utctimetuple())
def timestamp_to_date(timestamp):
return (datetime.datetime.fromtimestamp(timestamp).
strftime('%Y-%b-%d'))
def timestamp_to_week(timestamp):
# Jan 4th 1970 is the first Sunday in the Epoch
return (timestamp - 3 * 24 * 3600) // (7 * 24 * 3600)
def week_to_date(week):
timestamp = week * 7 * 24 * 3600 + 3 * 24 * 3600
return (datetime.datetime.fromtimestamp(timestamp).
strftime('%Y-%m-%d %H:%M:%S'))
def timestamp_to_day(timestamp):
return timestamp // (24 * 3600)
def timestamp_to_utc_date(timestamp):
return (datetime.datetime.fromtimestamp(timestamp).
strftime('%Y-%m-%d'))
def round_timestamp_to_day(timestamp):
return (int(timestamp) // (24 * 3600)) * (24 * 3600)
def check_email_validity(email):
if email:
return re.match(r'[\w\d_\.-]+@([\w\d_\.-]+\.)+[\w]+', email)
return False
user_agents = [
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64) Gecko/20100101 Firefox/32.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_6) AppleWebKit/537.78.2',
'Mozilla/5.0 (Windows NT 6.3; WOW64) Gecko/20100101 Firefox/32.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X) Chrome/37.0.2062.120',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
]
def read_uri(uri):
try:
req = six.moves.urllib.request.Request(
url=uri, headers={'User-Agent': random.choice(user_agents)})
fd = six.moves.urllib.request.urlopen(req)
if six.PY3:
fd = io.TextIOWrapper(fd)
raw = fd.read()
fd.close()
return raw
except Exception as e:
LOG.warn('Error "%(error)s" while reading uri %(uri)s',
{'error': e, 'uri': uri})
def read_json_from_uri(uri):
try:
return json.loads(read_uri(uri))
except Exception as e:
LOG.warn('Error "%(error)s" parsing json from uri %(uri)s',
{'error': e, 'uri': uri})
def gzip_decompress(content):
if six.PY3:
return gzip.decompress(content).decode('utf8')
else:
gzip_fd = gzip.GzipFile(fileobj=six.moves.StringIO(content))
return gzip_fd.read()
def cmp_to_key(mycmp): # ported from python 3
"""Convert a cmp= function into a key= function."""
class K(object):
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
__hash__ = None
return K
def make_range(start, stop, step):
last_full = stop - ((stop - start) % step)
for i in six.moves.range(start, last_full, step):
yield six.moves.range(i, i + step)
if stop > last_full:
yield six.moves.range(last_full, stop)
def load_repos(runtime_storage_inst):
return runtime_storage_inst.get_by_key('repos') or []
def unwrap_text(text):
res = ''
for line in text.splitlines():
s = line.rstrip()
if not s:
continue
res += line
if (not s[0].isalpha()) or (s[-1] in ['.', '!', '?', '>', ':', ';']):
res += '\n'
else:
res += ' '
return res.rstrip()
def format_text(s):
s = cgi.escape(re.sub(re.compile('\n{2,}', flags=re.MULTILINE), '\n', s))
def replace_dots(match_obj):
return re.sub(r'([\./]+)', r'\1​', match_obj.group(0))
s = re.sub(r'((?:\w+[\./]+)+\w+)', replace_dots, s)
return s
def make_age_string(seconds):
days = seconds / (3600 * 24)
hours = (seconds / 3600) - (days * 24)
return '%d days and %d hours' % (days, hours)
def merge_records(original, new):
need_update = False
for key, value in six.iteritems(new):
if original.get(key) != value:
need_update = True
original[key] = value
return need_update
def get_blueprint_id(module, name):
return module + ':' + name
def make_bug_id(bug_id, module, release=None):
if release:
return '/'.join([module, release, bug_id])
else:
return '/'.join([module, bug_id])
def get_patch_id(review_id, patch_number):
return '%s:%s' % (review_id, patch_number)
def add_index(sequence, start=1, item_filter=lambda x: True):
n = start
for item in sequence:
if item_filter(item):
item['index'] = n
n += 1
else:
item['index'] = ''
return sequence
def safe_encode(s):
return six.moves.urllib.parse.quote(s.encode('utf-8'))
def keep_safe_chars(s):
return re.sub(r'[^\x21-\x7e\x80-\xff]+', '', s)
def make_module_group(module_group_id, name=None, modules=None, tag='module'):
return {'id': module_group_id,
'module_group_name': name or module_group_id,
'modules': modules or [module_group_id],
'tag': tag}
BAD_NAME_SUFFIXES = ['Ltd', 'Pvt', 'Inc', 'GmbH', 'AG', 'Corporation', 'Corp',
'Company', 'Co', 'Group', 'Srl', 'Limited', 'LLC', 'IT']
BAD_NAME_SUFFIXES_WITH_STOPS = ['S.p.A.', 's.r.o.', 'L.P.', 'B.V.', 'K.K.',
'd.o.o.']
def normalize_company_name(name):
regex = '(\\b(' + '|'.join(BAD_NAME_SUFFIXES) + ')\\b)'
regex += '|' + '((^|\\s)(' + '|'.join(BAD_NAME_SUFFIXES_WITH_STOPS) + '))'
name = re.sub(re.compile(regex, re.IGNORECASE), '', name)
return ''.join([c.lower() for c in name if c.isalnum()])
def normalize_company_draft(name):
name = re.sub(',', ' ', name)
name = re.sub(r'\s+', ' ', name)
return name
def validate_lp_display_name(lp_profile):
if lp_profile:
if "<email address hidden>" == lp_profile['display_name']:
lp_profile['display_name'] = lp_profile['name']
| |
import os
import sys
import time
import unittest
from amfast.remoting.connection_manager import NotConnectedError, \
SessionAttrError, MemoryConnectionManager
class ConnectionTestCase(unittest.TestCase):
class TestChannel(object):
def __init__(self):
self.name = 'test'
def setUp(self):
self.manager.reset()
self.channel = self.TestChannel()
def _testConnectionProps(self, connection):
"""Checks newly inited props."""
self.assertEquals(36, len(connection.id))
self.assertEquals(self.channel.name, connection.channel_name)
self.assertTrue(connection.connected)
self.assertFalse(connection.authenticated)
self.assertEquals(None, connection.flex_user)
self.assertTrue(connection.last_active > 0)
self.assertTrue(time.time() * 1000 > connection.last_active)
def _testCompare(self, connection, connection_2):
self.assertEquals(connection.id, connection_2.id)
self.assertEquals(connection.channel_name, connection_2.channel_name)
self.assertEquals(connection.timeout, connection_2.timeout)
self.assertEquals(connection.connected, connection_2.connected)
self.assertEquals(connection.authenticated, connection_2.authenticated)
self.assertEquals(connection.flex_user, connection_2.flex_user)
self.assertEquals(connection.last_active, connection_2.last_active)
self.assertEquals(connection.last_polled, connection_2.last_polled)
def testCreateConnection(self):
connection = self.manager.createConnection(self.channel)
self._testConnectionProps(connection)
def testGetConnection(self):
connection = self.manager.createConnection(self.channel)
last_active = connection.last_active
new_connection = self.manager.getConnection(connection.id, touch=True)
self._testConnectionProps(new_connection)
self.assertTrue(last_active < new_connection.last_active)
last_active = new_connection.last_active
new_connection = self.manager.getConnection(connection.id, touch=False)
self.assertEquals(last_active, new_connection.last_active)
self._testCompare(connection, new_connection)
def testGetConnectionRaisesNotConnectedError(self):
self.assertRaises(NotConnectedError, self.manager.getConnection, 'not_connected')
def testDeleteConnection(self):
connection = self.manager.createConnection(self.channel)
self.manager.deleteConnection(connection)
self.assertFalse(connection.connected)
self.assertRaises(NotConnectedError, self.manager.getConnection, connection.id)
def testConnectConnection(self):
connection = self.manager.createConnection(self.channel)
connection.disconnect()
self.assertFalse(connection.connected)
connection.connect()
self.assertTrue(connection.connected)
def testTouch(self):
connection = self.manager.createConnection(self.channel)
last_active = connection.last_active
connection.touch()
self.assertTrue(connection.last_active > last_active)
def testTouchPoll(self):
connection = self.manager.createConnection(self.channel)
last_polled = connection.last_polled
connection.touchPolled()
self.assertTrue(connection.last_polled > last_polled)
def testAuthenticate(self):
connection = self.manager.createConnection(self.channel)
user = 'tester'
connection.authenticate(user)
self.assertTrue(connection.authenticated)
self.assertEquals(user, connection.flex_user)
connection.unAuthenticate()
self.assertFalse(connection.authenticated)
self.assertEquals(None, connection.flex_user)
def testNotifyFunc(self):
def notify():
return True
connection = self.manager.createConnection(self.channel)
connection.setNotifyFunc(notify)
self.assertTrue(connection.notify_func())
connection.unSetNotifyFunc()
self.assertEquals(None, connection.notify_func)
def testSessionAttrs(self):
connection = self.manager.createConnection(self.channel)
key_1 = 'key_1'
key_2 = 'key_2'
val_1 = 'val_1'
val_2 = 'val_2'
connection.setSessionAttr(key_1, val_1)
connection.setSessionAttr(key_2, val_2)
self.assertEquals(val_1, connection.getSessionAttr(key_1))
self.assertEquals(val_2, connection.getSessionAttr(key_2))
connection = self.manager.getConnection(connection.id)
self.assertEquals(val_1, connection.getSessionAttr(key_1))
self.assertEquals(val_2, connection.getSessionAttr(key_2))
connection.delSessionAttr(key_1)
self.assertEquals(val_2, connection.getSessionAttr(key_2))
connection = self.manager.getConnection(connection.id)
self.assertRaises(SessionAttrError, connection.getSessionAttr, key_1)
def testReset(self):
connection = self.manager.createConnection(self.channel)
self.manager.reset()
self.assertRaises(NotConnectedError, self.manager.getConnection, connection.id)
def testChannelCount(self):
count = 5
ids = []
for i in xrange(count):
connection = self.manager.createConnection(self.channel)
ids.append(connection.id)
self.assertEquals(count, self.manager.getConnectionCount(self.channel.name))
for connection_id in ids:
connection = self.manager.getConnection(connection_id)
connection.delete()
self.assertEquals(0, self.manager.getConnectionCount(self.channel.name))
class MemoryTestCase(ConnectionTestCase):
def setUp(self):
self.manager = MemoryConnectionManager()
ConnectionTestCase.setUp(self)
class GaeTestCase(ConnectionTestCase):
def setUp(self):
from amfast.remoting.gae_connection_manager import GaeConnectionManager
self.manager = GaeConnectionManager()
ConnectionTestCase.setUp(self)
def testNotifyFunc(self):
pass
class MemcacheTestCase(ConnectionTestCase):
def setUp(self):
from amfast.remoting.memcache_connection_manager import MemcacheConnectionManager
self.manager = MemcacheConnectionManager()
ConnectionTestCase.setUp(self)
def testNotifyFunc(self):
pass
class SaTestCase(ConnectionTestCase):
def setUp(self):
import sqlalchemy as sa
from amfast.remoting.sa_connection_manager import SaConnectionManager
engine = sa.create_engine('sqlite:///sa_test_case.db', echo=False)
metadata = sa.MetaData()
self.manager = SaConnectionManager(engine, metadata)
self.manager.createTables()
ConnectionTestCase.setUp(self)
def suite():
tests = [
unittest.TestLoader().loadTestsFromTestCase(MemoryTestCase)
]
print "\n---- Optional Connection Tests ----"
try:
from amfast.remoting.gae_connection_manager import GaeConnectionManager
except Exception:
# Skip if we're not in Gae environment.
print "Skipping GAE test."
else:
print "Running GAE test."
tests.append(unittest.TestLoader().loadTestsFromTestCase(GaeTestCase))
try:
import sqlalchemy as sa
from amfast.remoting.sa_connection_manager import SaConnectionManager
except Exception:
# Skip if SQLAlchemy is not installed.
print "Skipping SA test."
else:
print "Running SA test."
tests.append(unittest.TestLoader().loadTestsFromTestCase(SaTestCase))
try:
from amfast.remoting.memcache_connection_manager import MemcacheConnectionManager
# Check connection
manager = MemcacheConnectionManager()
if manager.mc.set("test", True) is not True:
print "Memcache set failed."
raise Error("Memcache connection failed.")
if manager.mc.get("test") != True:
print "Memcache get failed."
raise Error("Memcache connection failed.")
except Exception:
# Skip if memcache support is not installed.
print "Skipping Memcache test."
else:
print "Running Memcache test."
tests.append(unittest.TestLoader().loadTestsFromTestCase(MemcacheTestCase))
print "--------"
return unittest.TestSuite(tests)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| |
import yaml
import os
import re
import shutil
import subprocess
from copy import deepcopy
import CreateSectionTable
TEST_CASE_PATTERN = {
"initial condition": "UTINIT1",
"SDK": "ESP32_IDF",
"level": "Unit",
"execution time": 0,
"Test App": "UT",
"auto test": "Yes",
"category": "Function",
"test point 1": "basic function",
"version": "v1 (2016-12-06)",
"test environment": "UT_T1_1",
"expected result": "1. set succeed"
}
CONFIG_FILE_PATTERN = {
"Config": {"execute count": 1, "execute order": "in order"},
"DUT": [],
"Filter": [{"Add": {"ID": []}}]
}
class Parser(object):
""" parse unit test cases from build files and create files for test bench """
TAG_PATTERN = re.compile("([^=]+)(=)?(.+)?")
DESCRIPTION_PATTERN = re.compile("\[([^]\[]+)\]")
def __init__(self, idf_path=os.getenv("IDF_PATH")):
self.test_env_tags = {}
self.unit_jobs = {}
self.file_name_cache = {}
self.idf_path = idf_path
self.tag_def = yaml.load(open(os.path.join(idf_path, "tools", "unit-test-app", "tools",
"TagDefinition.yml"), "r"))
self.module_map = yaml.load(open(os.path.join(idf_path, "tools", "unit-test-app", "tools",
"ModuleDefinition.yml"), "r"))
def parse_test_cases_from_elf(self, elf_file):
"""
parse test cases from elf and save test cases to unit test folder
:param elf_file: elf file path
"""
subprocess.check_output('xtensa-esp32-elf-objdump -t {} | grep \ test_desc > case_address.tmp'.format(elf_file),
shell=True)
subprocess.check_output('xtensa-esp32-elf-objdump -s {} > section_table.tmp'.format(elf_file), shell=True)
table = CreateSectionTable.SectionTable("section_table.tmp")
test_cases = []
with open("case_address.tmp", "r") as f:
for line in f:
# process symbol table like: "3ffb4310 l O .dram0.data 00000018 test_desc_33$5010"
line = line.split()
test_addr = int(line[0], 16)
section = line[3]
name_addr = table.get_unsigned_int(section, test_addr, 4)
desc_addr = table.get_unsigned_int(section, test_addr + 4, 4)
file_name_addr = table.get_unsigned_int(section, test_addr + 12, 4)
name = table.get_string("any", name_addr)
desc = table.get_string("any", desc_addr)
file_name = table.get_string("any", file_name_addr)
tc = self.parse_one_test_case(name, desc, file_name)
if tc["CI ready"] == "Yes":
# update test env list and the cases of same env list
if tc["test environment"] in self.test_env_tags:
self.test_env_tags[tc["test environment"]].append(tc["ID"])
else:
self.test_env_tags.update({tc["test environment"]: [tc["ID"]]})
test_cases.append(tc)
os.remove("section_table.tmp")
os.remove("case_address.tmp")
self.dump_test_cases(test_cases)
def parse_case_properities(self, tags_raw):
"""
parse test case tags (properities) with the following rules:
* first tag is always group of test cases, it's mandatory
* the rest tags should be [type=value].
* if the type have default value, then [type] equal to [type=default_value].
* if the type don't don't exist, then equal to [type=omitted_value]
default_value and omitted_value are defined in TagDefinition.yml
:param tags_raw: raw tag string
:return: tag dict
"""
tags = self.DESCRIPTION_PATTERN.findall(tags_raw)
assert len(tags) > 0
p = dict([(k, self.tag_def[k]["omitted"]) for k in self.tag_def])
p["module"] = tags[0]
if p["module"] not in self.module_map:
p["module"] = "misc"
# parsing rest tags, [type=value], =value is optional
for tag in tags[1:]:
match = self.TAG_PATTERN.search(tag)
assert match is not None
tag_type = match.group(1)
tag_value = match.group(3)
if match.group(2) == "=" and tag_value is None:
# [tag_type=] means tag_value is empty string
tag_value = ""
if tag_type in p:
if tag_value is None:
p[tag_type] = self.tag_def[tag_type]["default"]
else:
p[tag_type] = tag_value
else:
# ignore not defined tag type
pass
return p
def parse_one_test_case(self, name, description, file_name):
"""
parse one test case
:param name: test case name (summary)
:param description: test case description (tag string)
:param file_name: the file defines this test case
:return: parsed test case
"""
prop = self.parse_case_properities(description)
if file_name in self.file_name_cache:
self.file_name_cache[file_name] += 1
else:
self.file_name_cache[file_name] = 1
tc_id = "UT_%s_%s_%03d%02d" % (self.module_map[prop["module"]]['module abbr'],
self.module_map[prop["module"]]['sub module abbr'],
hash(file_name) % 1000,
self.file_name_cache[file_name])
test_case = deepcopy(TEST_CASE_PATTERN)
test_case.update({"module": self.module_map[prop["module"]]['module'],
"CI ready": "No" if prop["ignore"] == "Yes" else "Yes",
"cmd set": ["IDFUnitTest/UnitTest", [name]],
"ID": tc_id,
"test point 2": prop["module"],
"steps": name,
"test environment": prop["test_env"],
"sub module": self.module_map[prop["module"]]['sub module'],
"summary": name})
return test_case
def dump_test_cases(self, test_cases):
"""
dump parsed test cases to YAML file for test bench input
:param test_cases: parsed test cases
"""
with open(os.path.join(self.idf_path, "components", "idf_test", "unit_test", "TestCaseAll.yml"), "wb+") as f:
yaml.dump({"test cases": test_cases}, f, allow_unicode=True, default_flow_style=False)
def dump_ci_config(self):
""" assign test cases and dump to config file to test bench """
test_cases_by_jobs = self.assign_test_cases()
ci_config_folder = os.path.join(self.idf_path, "components", "idf_test", "unit_test", "CIConfigs")
if not os.path.exists(ci_config_folder):
os.makedirs(os.path.join(ci_config_folder, "CIConfigs"))
for unit_job in self.unit_jobs:
job = deepcopy(CONFIG_FILE_PATTERN)
job.update({"DUT": ["UT1"]})
job.update({"Filter": [{"Add": {"ID": test_cases_by_jobs[unit_job]}}]})
with open(os.path.join(ci_config_folder, unit_job + ".yml"), "wb+") as f:
yaml.dump(job, f, allow_unicode=True, default_flow_style=False)
def assign_test_cases(self):
""" assign test cases to jobs """
test_cases_by_jobs = {}
for job in self.unit_jobs:
test_cases_by_jobs.update({job: list()})
for test_env in self.test_env_tags:
available_jobs = list()
for job in self.unit_jobs:
if test_env in self.unit_jobs[job]:
available_jobs.append(job)
for idx, job in enumerate(available_jobs):
test_cases_by_jobs[job] += (self.test_env_tags[test_env]
[idx*len(self.test_env_tags[test_env])/len(available_jobs):
(idx+1)*len(self.test_env_tags[test_env])/len(available_jobs)])
return test_cases_by_jobs
def parse_gitlab_ci(self):
""" parse gitlab ci config file to get pre-defined unit test jobs """
with open(os.path.join(self.idf_path, ".gitlab-ci.yml"), "r") as f:
gitlab_ci = yaml.load(f)
keys = gitlab_ci.keys()
for key in keys:
if re.match("UT_", key):
test_env = gitlab_ci[key]["tags"]
unit_job = key
key = {}
key.update({unit_job: test_env})
self.unit_jobs.update(key)
def copy_module_def_file(self):
""" copy module def file to artifact path """
src = os.path.join(self.idf_path, "tools", "unit-test-app", "tools", "ModuleDefinition.yml")
dst = os.path.join(self.idf_path, "components", "idf_test", "unit_test")
shutil.copy(src, dst)
def test_parser():
parser = Parser()
# test parsing tags
# parsing module only and module in module list
prop = parser.parse_case_properities("[esp32]")
assert prop["module"] == "esp32"
# module not in module list
prop = parser.parse_case_properities("[not_in_list]")
assert prop["module"] == "misc"
# parsing a default tag, a tag with assigned value
prop = parser.parse_case_properities("[esp32][ignore][test_env=ABCD][not_support1][not_support2=ABCD]")
assert prop["ignore"] == "Yes" and prop["test_env"] == "ABCD" \
and "not_support1" not in prop and "not_supported2" not in prop
# parsing omitted value
prop = parser.parse_case_properities("[esp32]")
assert prop["ignore"] == "No" and prop["test_env"] == "UT_T1_1"
# parsing with incorrect format
try:
parser.parse_case_properities("abcd")
assert False
except AssertionError:
pass
# skip invalid data parse, [type=] assigns empty string to type
prop = parser.parse_case_properities("[esp32]abdc aaaa [ignore=]")
assert prop["module"] == "esp32" and prop["ignore"] == ""
# skip mis-paired []
prop = parser.parse_case_properities("[esp32][[ignore=b]][]][test_env=AAA]]")
assert prop["module"] == "esp32" and prop["ignore"] == "b" and prop["test_env"] == "AAA"
def main():
test_parser()
idf_path = os.getenv("IDF_PATH")
elf_path = os.path.join(idf_path, "tools", "unit-test-app", "build", "unit-test-app.elf")
parser = Parser(idf_path)
parser.parse_test_cases_from_elf(elf_path)
parser.parse_gitlab_ci()
parser.dump_ci_config()
parser.copy_module_def_file()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(_str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
_str = re.sub(r'^\s*\$\$.*\n', '', _str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', _str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
_next = PeekToken(tokens)
if not next:
return None
if _next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif _next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not _next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
_next = PeekToken(tokens)
if _next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| |
"""Undocumented Module"""
__all__ = ['DirectGuiBase', 'DirectGuiWidget']
from panda3d.core import *
from panda3d.direct import get_config_showbase
from . import DirectGuiGlobals as DGG
from .OnscreenText import *
from .OnscreenGeom import *
from .OnscreenImage import *
from direct.directtools.DirectUtil import ROUND_TO
from direct.showbase import DirectObject
from direct.task import Task
guiObjectCollector = PStatCollector("Client::GuiObjects")
"""
Base class for all Direct Gui items. Handles composite widgets and
command line argument parsing.
"""
"""
Code Overview:
1 Each widget defines a set of options (optiondefs) as a list of tuples
of the form ('name', defaultValue, handler).
'name' is the name of the option (used during construction of configure)
handler can be: None, method, or INITOPT. If a method is specified,
it will be called during widget construction (via initialiseoptions),
if the Handler is specified as an INITOPT, this is an option that can
only be set during widget construction.
2) DirectGuiBase.defineoptions is called. defineoption creates:
self._constructorKeywords = { keyword: [value, useFlag] }
a dictionary of the keyword options specified as part of the constructor
keywords can be of the form 'component_option', where component is
the name of a widget's component, a component group or a component alias
self._dynamicGroups, a list of group names for which it is permissible
to specify options before components of that group are created.
If a widget is a derived class the order of execution would be:
foo.optiondefs = {}
foo.defineoptions()
fooParent()
fooParent.optiondefs = {}
fooParent.defineoptions()
3) addoptions is called. This combines options specified as keywords to
the widget constructor (stored in self._constuctorKeywords)
with the default options (stored in optiondefs). Results are stored in
self._optionInfo = { keyword: [default, current, handler] }
If a keyword is of the form 'component_option' it is left in the
self._constructorKeywords dictionary (for use by component constructors),
otherwise it is 'used', and deleted from self._constructorKeywords.
Notes: - constructor keywords override the defaults.
- derived class default values override parent class defaults
- derived class handler functions override parent class functions
4) Superclass initialization methods are called (resulting in nested calls
to define options (see 2 above)
5) Widget components are created via calls to self.createcomponent.
User can specify aliases and groups for each component created.
Aliases are alternate names for components, e.g. a widget may have a
component with a name 'entryField', which itself may have a component
named 'entry', you could add an alias 'entry' for the 'entryField_entry'
These are stored in self.__componentAliases. If an alias is found,
all keyword entries which use that alias are expanded to their full
form (to avoid conversion later)
Groups allow option specifications that apply to all members of the group.
If a widget has components: 'text1', 'text2', and 'text3' which all belong
to the 'text' group, they can be all configured with keywords of the form:
'text_keyword' (e.g. text_font = 'comic.rgb'). A component's group
is stored as the fourth element of its entry in self.__componentInfo
Note: the widget constructors have access to all remaining keywords in
_constructorKeywords (those not transferred to _optionInfo by
define/addoptions). If a component defines an alias that applies to
one of the keywords, that keyword is replaced with a new keyword with
the alias expanded.
If a keyword (or substituted alias keyword) is used during creation of the
component, it is deleted from self._constructorKeywords. If a group
keyword applies to the component, that keyword is marked as used, but is
not deleted from self._constructorKeywords, in case it applies to another
component. If any constructor keywords remain at the end of component
construction (and initialisation), an error is raised.
5) initialiseoptions is called. This method calls any option handlers to
respond to any keyword/default values, then checks to see if any keywords
are left unused. If so, an error is raised.
"""
class DirectGuiBase(DirectObject.DirectObject):
def __init__(self):
# Default id of all gui object, subclasses should override this
self.guiId = 'guiObject'
# List of all post initialization functions
self.postInitialiseFuncList = []
# To avoid doing things redundantly during initialisation
self.fInit = 1
# Mapping from each megawidget option to a list of information
# about the option
# - default value
# - current value
# - function to call when the option is initialised in the
# call to initialiseoptions() in the constructor or
# modified via configure(). If this is INITOPT, the
# option is an initialisation option (an option that can
# be set by the call to the constructor but can not be
# used with configure).
# This mapping is not initialised here, but in the call to
# defineoptions() which precedes construction of this base class.
#
# self._optionInfo = {}
# Mapping from each component name to a tuple of information
# about the component.
# - component widget instance
# - configure function of widget instance
# - the class of the widget (Frame, EntryField, etc)
# - cget function of widget instance
# - the name of the component group of this component, if any
self.__componentInfo = {}
# Mapping from alias names to the names of components or
# sub-components.
self.__componentAliases = {}
# Contains information about the keywords provided to the
# constructor. It is a mapping from the keyword to a tuple
# containing:
# - value of keyword
# - a boolean indicating if the keyword has been used.
# A keyword is used if, during the construction of a megawidget,
# - it is defined in a call to defineoptions() or addoptions(), or
# - it references, by name, a component of the megawidget, or
# - it references, by group, at least one component
# At the end of megawidget construction, a call is made to
# initialiseoptions() which reports an error if there are
# unused options given to the constructor.
#
# self._constructorKeywords = {}
# List of dynamic component groups. If a group is included in
# this list, then it not an error if a keyword argument for
# the group is given to the constructor or to configure(), but
# no components with this group have been created.
# self._dynamicGroups = ()
def defineoptions(self, keywords, optionDefs, dynamicGroups = ()):
""" defineoptions(keywords, optionDefs, dynamicGroups = {}) """
# Create options, providing the default value and the method
# to call when the value is changed. If any option created by
# base classes has the same name as one in <optionDefs>, the
# base class's value and function will be overriden.
# keywords is a dictionary of keyword/value pairs from the constructor
# optionDefs is a dictionary of default options for the widget
# dynamicGroups is a tuple of component groups for which you can
# specify options even though no components of this group have
# been created
# This should be called before the constructor of the base
# class, so that default values defined in the derived class
# override those in the base class.
if not hasattr(self, '_constructorKeywords'):
tmp = {}
for option, value in keywords.items():
tmp[option] = [value, 0]
self._constructorKeywords = tmp
self._optionInfo = {}
# Initialize dictionary of dynamic groups
if not hasattr(self, '_dynamicGroups'):
self._dynamicGroups = ()
self._dynamicGroups = self._dynamicGroups + tuple(dynamicGroups)
# Reconcile command line and default options
self.addoptions(optionDefs, keywords)
def addoptions(self, optionDefs, optionkeywords):
""" addoptions(optionDefs) - add option def to option info """
# Add additional options, providing the default value and the
# method to call when the value is changed. See
# "defineoptions" for more details
# optimisations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.__contains__
keywords = self._constructorKeywords
keywords_has_key = keywords.__contains__
FUNCTION = DGG._OPT_FUNCTION
for name, default, function in optionDefs:
if '_' not in name:
default = optionkeywords.get(name, default)
# The option will already exist if it has been defined
# in a derived class. In this case, do not override the
# default value of the option or the callback function
# if it is not None.
if not optionInfo_has_key(name):
if keywords_has_key(name):
# Overridden by keyword, use keyword value
value = keywords[name][0]
optionInfo[name] = [default, value, function]
# Delete it from self._constructorKeywords
del keywords[name]
else:
# Use optionDefs value
optionInfo[name] = [default, default, function]
elif optionInfo[name][FUNCTION] is None:
# Only override function if not defined by derived class
optionInfo[name][FUNCTION] = function
else:
# This option is of the form "component_option". If this is
# not already defined in self._constructorKeywords add it.
# This allows a derived class to override the default value
# of an option of a component of a base class.
if not keywords_has_key(name):
keywords[name] = [default, 0]
def initialiseoptions(self, myClass):
"""
Call all initialisation functions to initialize widget
options to default of keyword value
"""
# This is to make sure this method class is only called by
# the most specific class in the class hierarchy
if self.__class__ is myClass:
# Call the configuration callback function for every option.
FUNCTION = DGG._OPT_FUNCTION
self.fInit = 1
for info in self._optionInfo.values():
func = info[FUNCTION]
if func is not None and func is not DGG.INITOPT:
func()
self.fInit = 0
# Now check if anything is left over
unusedOptions = []
keywords = self._constructorKeywords
for name in keywords:
used = keywords[name][1]
if not used:
# This keyword argument has not been used. If it
# does not refer to a dynamic group, mark it as
# unused.
index = name.find('_')
if index < 0 or name[:index] not in self._dynamicGroups:
unusedOptions.append(name)
self._constructorKeywords = {}
if len(unusedOptions) > 0:
if len(unusedOptions) == 1:
text = 'Unknown option "'
else:
text = 'Unknown options "'
raise KeyError(text + ', '.join(unusedOptions) + \
'" for ' + myClass.__name__)
# Can now call post init func
self.postInitialiseFunc()
def postInitialiseFunc(self):
for func in self.postInitialiseFuncList:
func()
def isinitoption(self, option):
"""
Is this opition one that can only be specified at construction?
"""
return self._optionInfo[option][DGG._OPT_FUNCTION] is DGG.INITOPT
def options(self):
"""
Print out a list of available widget options.
Does not include subcomponent options.
"""
options = []
if hasattr(self, '_optionInfo'):
for option, info in self._optionInfo.items():
isinit = info[DGG._OPT_FUNCTION] is DGG.INITOPT
default = info[DGG._OPT_DEFAULT]
options.append((option, default, isinit))
options.sort()
return options
def configure(self, option=None, **kw):
"""
configure(option = None)
Query or configure the megawidget options.
"""
#
# If not empty, *kw* is a dictionary giving new
# values for some of the options of this gui item
# For options defined for this widget, set
# the value of the option to the new value and call the
# configuration callback function, if any.
#
# If *option* is None, return all gui item configuration
# options and settings. Options are returned as standard 3
# element tuples
#
# If *option* is a string, return the 3 element tuple for the
# given configuration option.
# First, deal with the option queries.
if len(kw) == 0:
# This configure call is querying the values of one or all options.
# Return 3-tuples:
# (optionName, default, value)
if option is None:
rtn = {}
for option, config in self._optionInfo.items():
rtn[option] = (option,
config[DGG._OPT_DEFAULT],
config[DGG._OPT_VALUE])
return rtn
else:
config = self._optionInfo[option]
return (option, config[DGG._OPT_DEFAULT], config[DGG._OPT_VALUE])
# optimizations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.__contains__
componentInfo = self.__componentInfo
componentInfo_has_key = componentInfo.__contains__
componentAliases = self.__componentAliases
componentAliases_has_key = componentAliases.__contains__
VALUE = DGG._OPT_VALUE
FUNCTION = DGG._OPT_FUNCTION
# This will contain a list of options in *kw* which
# are known to this gui item.
directOptions = []
# This will contain information about the options in
# *kw* of the form <component>_<option>, where
# <component> is a component of this megawidget. It is a
# dictionary whose keys are the configure method of each
# component and whose values are a dictionary of options and
# values for the component.
indirectOptions = {}
indirectOptions_has_key = indirectOptions.__contains__
for option, value in kw.items():
if optionInfo_has_key(option):
# This is one of the options of this gui item.
# Check it is an initialisation option.
if optionInfo[option][FUNCTION] is DGG.INITOPT:
print('Cannot configure initialisation option "' \
+ option + '" for ' + self.__class__.__name__)
break
#raise KeyError, \
# 'Cannot configure initialisation option "' \
# + option + '" for ' + self.__class__.__name__
optionInfo[option][VALUE] = value
directOptions.append(option)
else:
index = option.find('_')
if index >= 0:
# This option may be of the form <component>_<option>.
# e.g. if alias ('efEntry', 'entryField_entry')
# and option = efEntry_width
# component = efEntry, componentOption = width
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if componentAliases_has_key(component):
# component = entryField, subcomponent = entry
component, subComponent = componentAliases[component]
if subComponent is not None:
# componentOption becomes entry_width
componentOption = subComponent + '_' \
+ componentOption
# Expand option string to write on error
# option = entryField_entry_width
option = component + '_' + componentOption
# Does this component exist
if componentInfo_has_key(component):
# Get the configure func for the named component
# component = entryField
componentConfigFuncs = [componentInfo[component][1]]
else:
# Check if this is a group name and configure all
# components in the group.
componentConfigFuncs = []
# For each component
for info in componentInfo.values():
# Check if it is a member of this group
if info[4] == component:
# Yes, append its config func
componentConfigFuncs.append(info[1])
if len(componentConfigFuncs) == 0 and \
component not in self._dynamicGroups:
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Add the configure method(s) (may be more than
# one if this is configuring a component group)
# and option/value to dictionary.
for componentConfigFunc in componentConfigFuncs:
if not indirectOptions_has_key(componentConfigFunc):
indirectOptions[componentConfigFunc] = {}
# Create a dictionary of keyword/values keyed
# on configuration function
indirectOptions[componentConfigFunc][componentOption] \
= value
else:
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Call the configure methods for any components.
# Pass in the dictionary of keyword/values created above
for func, options in indirectOptions.items():
func(**options)
# Call the configuration callback function for each option.
for option in directOptions:
info = optionInfo[option]
func = info[DGG._OPT_FUNCTION]
if func is not None:
func()
# Allow index style references
def __setitem__(self, key, value):
self.configure(**{key: value})
def cget(self, option):
"""
Get current configuration setting for this option
"""
# Return the value of an option, for example myWidget['font'].
if option in self._optionInfo:
return self._optionInfo[option][DGG._OPT_VALUE]
else:
index = option.find('_')
if index >= 0:
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if component in self.__componentAliases:
component, subComponent = self.__componentAliases[
component]
if subComponent is not None:
componentOption = subComponent + '_' + componentOption
# Expand option string to write on error
option = component + '_' + componentOption
if component in self.__componentInfo:
# Call cget on the component.
componentCget = self.__componentInfo[component][3]
return componentCget(componentOption)
else:
# If this is a group name, call cget for one of
# the components in the group.
for info in self.__componentInfo.values():
if info[4] == component:
componentCget = info[3]
return componentCget(componentOption)
# Option not found
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Allow index style refererences
__getitem__ = cget
def createcomponent(self, componentName, componentAliases, componentGroup,
widgetClass, *widgetArgs, **kw):
"""
Create a component (during construction or later) for this widget.
"""
# Check for invalid component name
if '_' in componentName:
raise ValueError('Component name "%s" must not contain "_"' % componentName)
# Get construction keywords
if hasattr(self, '_constructorKeywords'):
keywords = self._constructorKeywords
else:
keywords = {}
for alias, component in componentAliases:
# Create aliases to the component and its sub-components.
index = component.find('_')
if index < 0:
# Just a shorter name for one of this widget's components
self.__componentAliases[alias] = (component, None)
else:
# An alias for a component of one of this widget's components
mainComponent = component[:index]
subComponent = component[(index + 1):]
self.__componentAliases[alias] = (mainComponent, subComponent)
# Remove aliases from the constructor keyword arguments by
# replacing any keyword arguments that begin with *alias*
# with corresponding keys beginning with *component*.
alias = alias + '_'
aliasLen = len(alias)
for option in keywords.copy():
if len(option) > aliasLen and option[:aliasLen] == alias:
newkey = component + '_' + option[aliasLen:]
keywords[newkey] = keywords[option]
del keywords[option]
# Find any keyword arguments for this component
componentPrefix = componentName + '_'
nameLen = len(componentPrefix)
# First, walk through the option list looking for arguments
# than refer to this component's group.
for option in keywords:
# Check if this keyword argument refers to the group
# of this component. If so, add this to the options
# to use when constructing the widget. Mark the
# keyword argument as being used, but do not remove it
# since it may be required when creating another
# component.
index = option.find('_')
if index >= 0 and componentGroup == option[:index]:
rest = option[(index + 1):]
kw[rest] = keywords[option][0]
keywords[option][1] = 1
# Now that we've got the group arguments, walk through the
# option list again and get out the arguments that refer to
# this component specifically by name. These are more
# specific than the group arguments, above; we walk through
# the list afterwards so they will override.
for option in keywords.copy():
if len(option) > nameLen and option[:nameLen] == componentPrefix:
# The keyword argument refers to this component, so add
# this to the options to use when constructing the widget.
kw[option[nameLen:]] = keywords[option][0]
# And delete it from main construction keywords
del keywords[option]
# Return None if no widget class is specified
if widgetClass is None:
return None
# Get arguments for widget constructor
if len(widgetArgs) == 1 and type(widgetArgs[0]) == tuple:
# Arguments to the constructor can be specified as either
# multiple trailing arguments to createcomponent() or as a
# single tuple argument.
widgetArgs = widgetArgs[0]
# Create the widget
widget = widgetClass(*widgetArgs, **kw)
componentClass = widget.__class__.__name__
self.__componentInfo[componentName] = (widget, widget.configure,
componentClass, widget.cget, componentGroup)
return widget
def component(self, name):
# Return a component widget of the megawidget given the
# component's name
# This allows the user of a megawidget to access and configure
# widget components directly.
# Find the main component and any subcomponents
index = name.find('_')
if index < 0:
component = name
remainingComponents = None
else:
component = name[:index]
remainingComponents = name[(index + 1):]
# Expand component alias
# Example entry which is an alias for entryField_entry
if component in self.__componentAliases:
# component = entryField, subComponent = entry
component, subComponent = self.__componentAliases[component]
if subComponent is not None:
if remainingComponents is None:
# remainingComponents = entry
remainingComponents = subComponent
else:
remainingComponents = subComponent + '_' \
+ remainingComponents
# Get the component from __componentInfo dictionary
widget = self.__componentInfo[component][0]
if remainingComponents is None:
# Not looking for subcomponent
return widget
else:
# Recursive call on subcomponent
return widget.component(remainingComponents)
def components(self):
# Return a list of all components.
names = list(self.__componentInfo.keys())
names.sort()
return names
def hascomponent(self, component):
return component in self.__componentInfo
def destroycomponent(self, name):
# Remove a megawidget component.
# This command is for use by megawidget designers to destroy a
# megawidget component.
self.__componentInfo[name][0].destroy()
del self.__componentInfo[name]
def destroy(self):
# Clean out any hooks
self.ignoreAll()
del self._optionInfo
del self.__componentInfo
del self.postInitialiseFuncList
def bind(self, event, command, extraArgs = []):
"""
Bind the command (which should expect one arg) to the specified
event (such as ENTER, EXIT, B1PRESS, B1CLICK, etc.)
See DirectGuiGlobals for possible events
"""
# Need to tack on gui item specific id
gEvent = event + self.guiId
if get_config_showbase().GetBool('debug-directgui-msgs', False):
from direct.showbase.PythonUtil import StackTrace
print(gEvent)
print(StackTrace())
self.accept(gEvent, command, extraArgs = extraArgs)
def unbind(self, event):
"""
Unbind the specified event
"""
# Need to tack on gui item specific id
gEvent = event + self.guiId
self.ignore(gEvent)
def toggleGuiGridSnap():
DirectGuiWidget.snapToGrid = 1 - DirectGuiWidget.snapToGrid
def setGuiGridSpacing(spacing):
DirectGuiWidget.gridSpacing = spacing
class DirectGuiWidget(DirectGuiBase, NodePath):
# Toggle if you wish widget's to snap to grid when draggin
snapToGrid = 0
gridSpacing = 0.05
# Determine the default initial state for inactive (or
# unclickable) components. If we are in edit mode, these are
# actually clickable by default.
guiEdit = get_config_showbase().GetBool('direct-gui-edit', 0)
if guiEdit:
inactiveInitState = DGG.NORMAL
else:
inactiveInitState = DGG.DISABLED
guiDict = {}
def __init__(self, parent = None, **kw):
# Direct gui widgets are node paths
# Direct gui widgets have:
# - stateNodePaths (to hold visible representation of widget)
# State node paths can have:
# - a frame of type (None, FLAT, RAISED, GROOVE, RIDGE)
# - arbitrary geometry for each state
# They inherit from DirectGuiWidget
# - Can create components (with aliases and groups)
# - Can bind to mouse events
# They inherit from NodePath
# - Can position/scale them
optiondefs = (
# Widget's constructor
('pgFunc', PGItem, None),
('numStates', 1, None),
('invertedFrames', (), None),
('sortOrder', 0, None),
# Widget's initial state
('state', DGG.NORMAL, self.setState),
# Widget's frame characteristics
('relief', DGG.FLAT, self.setRelief),
('borderWidth', (.1, .1), self.setBorderWidth),
('borderUvWidth', (.1, .1), self.setBorderUvWidth),
('frameSize', None, self.setFrameSize),
('frameColor', (.8, .8, .8, 1), self.setFrameColor),
('frameTexture', None, self.setFrameTexture),
('frameVisibleScale', (1, 1), self.setFrameVisibleScale),
('pad', (0, 0), self.resetFrameSize),
# Override button id (beware! your name may not be unique!)
('guiId', None, DGG.INITOPT),
# Initial pos/scale of the widget
('pos', None, DGG.INITOPT),
('hpr', None, DGG.INITOPT),
('scale', None, DGG.INITOPT),
('color', None, DGG.INITOPT),
# Do events pass through this widget?
('suppressMouse', 1, DGG.INITOPT),
('suppressKeys', 0, DGG.INITOPT),
('enableEdit', 1, DGG.INITOPT),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize the base classes (after defining the options).
DirectGuiBase.__init__(self)
NodePath.__init__(self)
# Create a button
self.guiItem = self['pgFunc']('')
# Override automatically generated guiId
if self['guiId']:
self.guiItem.setId(self['guiId'])
self.guiId = self.guiItem.getId()
if __dev__:
guiObjectCollector.addLevel(1)
guiObjectCollector.flushLevel()
# track gui items by guiId for tracking down leaks
if hasattr(base, 'guiItems'):
if self.guiId in base.guiItems:
base.notify.warning('duplicate guiId: %s (%s stomping %s)' %
(self.guiId, self,
base.guiItems[self.guiId]))
base.guiItems[self.guiId] = self
if hasattr(base, 'printGuiCreates'):
printStack()
# Attach button to parent and make that self
if (parent == None):
parent = aspect2d
self.assign(parent.attachNewNode(self.guiItem, self['sortOrder']))
# Update pose to initial values
if self['pos']:
self.setPos(self['pos'])
if self['hpr']:
self.setHpr(self['hpr'])
if self['scale']:
self.setScale(self['scale'])
if self['color']:
self.setColor(self['color'])
# Initialize names
# Putting the class name in helps with debugging.
self.setName("%s-%s" % (self.__class__.__name__, self.guiId))
# Create
self.stateNodePath = []
for i in range(self['numStates']):
self.stateNodePath.append(NodePath(self.guiItem.getStateDef(i)))
# Initialize frame style
self.frameStyle = []
for i in range(self['numStates']):
self.frameStyle.append(PGFrameStyle())
# For holding bounds info
self.ll = Point3(0)
self.ur = Point3(0)
# Is drag and drop enabled?
if self['enableEdit'] and self.guiEdit:
self.enableEdit()
# Set up event handling
suppressFlags = 0
if self['suppressMouse']:
suppressFlags |= MouseWatcherRegion.SFMouseButton
suppressFlags |= MouseWatcherRegion.SFMousePosition
if self['suppressKeys']:
suppressFlags |= MouseWatcherRegion.SFOtherButton
self.guiItem.setSuppressFlags(suppressFlags)
# Bind destroy hook
self.guiDict[self.guiId] = self
# self.bind(DGG.DESTROY, self.destroy)
# Update frame when everything has been initialized
self.postInitialiseFuncList.append(self.frameInitialiseFunc)
# Call option initialization functions
self.initialiseoptions(DirectGuiWidget)
def frameInitialiseFunc(self):
# Now allow changes to take effect
self.updateFrameStyle()
if not self['frameSize']:
self.resetFrameSize()
def enableEdit(self):
self.bind(DGG.B2PRESS, self.editStart)
self.bind(DGG.B2RELEASE, self.editStop)
self.bind(DGG.PRINT, self.printConfig)
# Can we move this to showbase
# Certainly we don't need to do this for every button!
#mb = base.mouseWatcherNode.getModifierButtons()
#mb.addButton(KeyboardButton.control())
#base.mouseWatcherNode.setModifierButtons(mb)
def disableEdit(self):
self.unbind(DGG.B2PRESS)
self.unbind(DGG.B2RELEASE)
self.unbind(DGG.PRINT)
#mb = base.mouseWatcherNode.getModifierButtons()
#mb.removeButton(KeyboardButton.control())
#base.mouseWatcherNode.setModifierButtons(mb)
def editStart(self, event):
taskMgr.remove('guiEditTask')
vWidget2render2d = self.getPos(render2d)
vMouse2render2d = Point3(event.getMouse()[0], 0, event.getMouse()[1])
editVec = Vec3(vWidget2render2d - vMouse2render2d)
if base.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.control()):
t = taskMgr.add(self.guiScaleTask, 'guiEditTask')
t.refPos = vWidget2render2d
t.editVecLen = editVec.length()
t.initScale = self.getScale()
else:
t = taskMgr.add(self.guiDragTask, 'guiEditTask')
t.editVec = editVec
def guiScaleTask(self, state):
mwn = base.mouseWatcherNode
if mwn.hasMouse():
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
newEditVecLen = Vec3(state.refPos - vMouse2render2d).length()
self.setScale(state.initScale * (newEditVecLen/state.editVecLen))
return Task.cont
def guiDragTask(self, state):
mwn = base.mouseWatcherNode
if mwn.hasMouse():
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
newPos = vMouse2render2d + state.editVec
self.setPos(render2d, newPos)
if DirectGuiWidget.snapToGrid:
newPos = self.getPos()
newPos.set(
ROUND_TO(newPos[0], DirectGuiWidget.gridSpacing),
ROUND_TO(newPos[1], DirectGuiWidget.gridSpacing),
ROUND_TO(newPos[2], DirectGuiWidget.gridSpacing))
self.setPos(newPos)
return Task.cont
def editStop(self, event):
taskMgr.remove('guiEditTask')
def setState(self):
if type(self['state']) == type(0):
self.guiItem.setActive(self['state'])
elif (self['state'] == DGG.NORMAL) or (self['state'] == 'normal'):
self.guiItem.setActive(1)
else:
self.guiItem.setActive(0)
def resetFrameSize(self):
if not self.fInit:
self.setFrameSize(fClearFrame = 1)
def setFrameSize(self, fClearFrame = 0):
# Use ready state to determine frame Type
frameType = self.getFrameType()
if self['frameSize']:
# Use user specified bounds
self.bounds = self['frameSize']
#print "%s bounds = %s" % (self.getName(), self.bounds)
bw = (0, 0)
else:
if fClearFrame and (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(PGFrameStyle.TNone)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# To force an update of the button
self.guiItem.getStateDef(0)
# Clear out frame before computing bounds
self.getBounds()
# Restore frame style if necessary
if (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(frameType)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
if ((frameType != PGFrameStyle.TNone) and
(frameType != PGFrameStyle.TFlat)):
bw = self['borderWidth']
else:
bw = (0, 0)
# Set frame to new dimensions
self.guiItem.setFrame(
self.bounds[0] - bw[0],
self.bounds[1] + bw[0],
self.bounds[2] - bw[1],
self.bounds[3] + bw[1])
def getBounds(self, state = 0):
self.stateNodePath[state].calcTightBounds(self.ll, self.ur)
# Scale bounds to give a pad around graphics
vec_right = Vec3.right()
vec_up = Vec3.up()
left = (vec_right[0] * self.ll[0]
+ vec_right[1] * self.ll[1]
+ vec_right[2] * self.ll[2])
right = (vec_right[0] * self.ur[0]
+ vec_right[1] * self.ur[1]
+ vec_right[2] * self.ur[2])
bottom = (vec_up[0] * self.ll[0]
+ vec_up[1] * self.ll[1]
+ vec_up[2] * self.ll[2])
top = (vec_up[0] * self.ur[0]
+ vec_up[1] * self.ur[1]
+ vec_up[2] * self.ur[2])
self.ll = Point3(left, 0.0, bottom)
self.ur = Point3(right, 0.0, top)
self.bounds = [self.ll[0] - self['pad'][0],
self.ur[0] + self['pad'][0],
self.ll[2] - self['pad'][1],
self.ur[2] + self['pad'][1]]
return self.bounds
def getWidth(self):
return self.bounds[1] - self.bounds[0]
def getHeight(self):
return self.bounds[3] - self.bounds[2]
def getCenter(self):
x = self.bounds[0] + (self.bounds[1] - self.bounds[0])/2.0
y = self.bounds[2] + (self.bounds[3] - self.bounds[2])/2.0
return (x, y)
def getFrameType(self, state = 0):
return self.frameStyle[state].getType()
def updateFrameStyle(self):
if not self.fInit:
for i in range(self['numStates']):
self.guiItem.setFrameStyle(i, self.frameStyle[i])
def setRelief(self, fSetStyle = 1):
relief = self['relief']
# Convert None, and string arguments
if relief == None:
relief = PGFrameStyle.TNone
elif isinstance(relief, str):
# Convert string to frame style int
relief = DGG.FrameStyleDict[relief]
# Set style
if relief == DGG.RAISED:
for i in range(self['numStates']):
if i in self['invertedFrames']:
self.frameStyle[1].setType(DGG.SUNKEN)
else:
self.frameStyle[i].setType(DGG.RAISED)
elif relief == DGG.SUNKEN:
for i in range(self['numStates']):
if i in self['invertedFrames']:
self.frameStyle[1].setType(DGG.RAISED)
else:
self.frameStyle[i].setType(DGG.SUNKEN)
else:
for i in range(self['numStates']):
self.frameStyle[i].setType(relief)
# Apply styles
self.updateFrameStyle()
def setFrameColor(self):
# this might be a single color or a list of colors
colors = self['frameColor']
if type(colors[0]) == int or \
type(colors[0]) == float:
colors = (colors,)
for i in range(self['numStates']):
if i >= len(colors):
color = colors[-1]
else:
color = colors[i]
self.frameStyle[i].setColor(color[0], color[1], color[2], color[3])
self.updateFrameStyle()
def setFrameTexture(self):
# this might be a single texture or a list of textures
textures = self['frameTexture']
if textures == None or \
isinstance(textures, Texture) or \
isinstance(textures, str):
textures = (textures,) * self['numStates']
for i in range(self['numStates']):
if i >= len(textures):
texture = textures[-1]
else:
texture = textures[i]
if isinstance(texture, str):
texture = loader.loadTexture(texture)
if texture:
self.frameStyle[i].setTexture(texture)
else:
self.frameStyle[i].clearTexture()
self.updateFrameStyle()
def setFrameVisibleScale(self):
scale = self['frameVisibleScale']
for i in range(self['numStates']):
self.frameStyle[i].setVisibleScale(scale[0], scale[1])
self.updateFrameStyle()
def setBorderWidth(self):
width = self['borderWidth']
for i in range(self['numStates']):
self.frameStyle[i].setWidth(width[0], width[1])
self.updateFrameStyle()
def setBorderUvWidth(self):
uvWidth = self['borderUvWidth']
for i in range(self['numStates']):
self.frameStyle[i].setUvWidth(uvWidth[0], uvWidth[1])
self.updateFrameStyle()
def destroy(self):
if hasattr(self, "frameStyle"):
if __dev__:
guiObjectCollector.subLevel(1)
guiObjectCollector.flushLevel()
if hasattr(base, 'guiItems'):
if self.guiId in base.guiItems:
del base.guiItems[self.guiId]
else:
base.notify.warning(
'DirectGuiWidget.destroy(): '
'gui item %s not in base.guiItems' %
self.guiId)
# Destroy children
for child in self.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
# RAU since we added the class to the name, try
# it with the original name
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
# messenger.send(DESTROY + child.getName())
del self.guiDict[self.guiId]
del self.frameStyle
# Get rid of node path
self.removeNode()
for nodePath in self.stateNodePath:
nodePath.removeNode()
del self.stateNodePath
del self.guiItem
# Call superclass destruction method (clears out hooks)
DirectGuiBase.destroy(self)
def printConfig(self, indent = 0):
space = ' ' * indent
print('%s%s - %s' % (space, self.guiId, self.__class__.__name__))
print('%sPos: %s' % (space, tuple(self.getPos())))
print('%sScale: %s' % (space, tuple(self.getScale())))
# Print out children info
for child in self.getChildren():
messenger.send(DGG.PRINT + child.getName(), [indent + 2])
def copyOptions(self, other):
"""
Copy other's options into our self so we look and feel like other
"""
for key, value in other._optionInfo.items():
self[key] = value[1]
def taskName(self, idString):
return (idString + "-" + str(self.guiId))
def uniqueName(self, idString):
return (idString + "-" + str(self.guiId))
def setProp(self, propString, value):
"""
Allows you to set a property like frame['text'] = 'Joe' in
a function instead of an assignment.
This is useful for setting properties inside function intervals
where must input a function and extraArgs, not an assignment.
"""
self[propString] = value
| |
#!/usr/bin/env python
"""
sentry.utils.runner
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from logan.runner import run_app, configure_app
import base64
import os
import sys
import pkg_resources
import warnings
USE_GEVENT = os.environ.get('USE_GEVENT') == '1'
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``sentry.db.postgres``
# If you change this, you'll also need to install the appropriate python
# package: psycopg2 (Postgres) or mysql-python
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# The administrative email for this installation.
# Note: This will be reported back to getsentry.com as the point of contact. See
# the beacon documentation for more information. This **must** be a string.
# SENTRY_ADMIN_EMAIL = 'your.name@example.com'
SENTRY_ADMIN_EMAIL = ''
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_REDIS_OPTIONS = {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
}
}
#########
# Cache #
#########
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
#
# SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
CELERY_ALWAYS_EAGER = False
BROKER_URL = 'redis://localhost:6379'
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
################
# File storage #
################
# Any Django storage backend is compatible with Sentry. For more solutions see
# the django-storages package: https://django-storages.readthedocs.org/en/latest/
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {
'location': '/tmp/sentry-files',
}
##############
# Web Server #
##############
# You MUST configure the absolute URI root for Sentry:
SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
# If you're using a reverse proxy, you should enable the X-Forwarded-Proto
# header and uncomment the following settings
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SESSION_COOKIE_SECURE = True
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of gunicorn workers
# 'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
###############
# Mail Server #
###############
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# The email address to send on behalf of
SERVER_EMAIL = 'root@localhost'
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
MAILGUN_API_KEY = ''
########
# etc. #
########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = %(default_key)r
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load app %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
register(plugin)
def initialize_receivers():
# force signal registration
import sentry.receivers # NOQA
def initialize_gevent():
from gevent import monkey
monkey.patch_all()
try:
import psycopg2 # NOQA
except ImportError:
pass
else:
from sentry.utils.gevent import make_psycopg_green
make_psycopg_green()
def initialize_app(config):
from django.utils import timezone
from sentry.app import env
if USE_GEVENT:
from django.db import connections
connections['default'].allow_thread_sharing = True
env.data['config'] = config.get('config_path')
env.data['start_date'] = timezone.now()
settings = config['settings']
fix_south(settings)
install_plugins(settings)
apply_legacy_settings(config)
# Commonly setups don't correctly configure themselves for production envs
# so lets try to provide a bit more guidance
if settings.CELERY_ALWAYS_EAGER and not settings.DEBUG:
warnings.warn('Sentry is configured to run asynchronous tasks in-process. '
'This is not recommended within production environments. '
'See https://docs.getsentry.com/on-premise/server/queue/ for more information.')
if settings.SENTRY_SINGLE_ORGANIZATION:
settings.SENTRY_FEATURES['organizations:create'] = False
settings.SUDO_COOKIE_SECURE = getattr(settings, 'SESSION_COOKIE_SECURE', False)
settings.SUDO_COOKIE_DOMAIN = getattr(settings, 'SESSION_COOKIE_DOMAIN', None)
initialize_receivers()
validate_backends()
def validate_backends():
from sentry import app
app.buffer.validate()
app.nodestore.validate()
app.quotas.validate()
app.search.validate()
app.ratelimiter.validate()
app.tsdb.validate()
def fix_south(settings):
# South needs an adapter defined conditionally
if settings.DATABASES['default']['ENGINE'] != 'sentry.db.postgres':
return
settings.SOUTH_DATABASE_ADAPTERS = {
'default': 'south.db.postgresql_psycopg2'
}
def show_big_error(message):
sys.stderr.write('\n')
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % ('!' * min(len(message), 80),))
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % message)
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % ('!' * min(len(message), 80),))
sys.stderr.write('\n')
def apply_legacy_settings(config):
settings = config['settings']
# SENTRY_USE_QUEUE used to determine if Celery was eager or not
if hasattr(settings, 'SENTRY_USE_QUEUE'):
warnings.warn('SENTRY_USE_QUEUE is deprecated. Please use CELERY_ALWAYS_EAGER instead. '
'See https://docs.getsentry.com/on-premise/server/queue/ for more information.', DeprecationWarning)
settings.CELERY_ALWAYS_EAGER = (not settings.SENTRY_USE_QUEUE)
if not settings.SENTRY_ADMIN_EMAIL:
show_big_error('SENTRY_ADMIN_EMAIL is not configured')
elif not isinstance(settings.SENTRY_ADMIN_EMAIL, basestring):
show_big_error('SENTRY_ADMIN_EMAIL must be a string')
if settings.SENTRY_URL_PREFIX in ('', 'http://sentry.example.com') and not settings.DEBUG:
# Maybe also point to a piece of documentation for more information?
# This directly coincides with users getting the awkward
# `ALLOWED_HOSTS` exception.
show_big_error('SENTRY_URL_PREFIX is not configured')
# Set `ALLOWED_HOSTS` to the catch-all so it works
settings.ALLOWED_HOSTS = ['*']
if settings.TIME_ZONE != 'UTC':
# non-UTC timezones are not supported
show_big_error('TIME_ZONE should be set to UTC')
# Set ALLOWED_HOSTS if it's not already available
if not settings.ALLOWED_HOSTS:
from urlparse import urlparse
urlbits = urlparse(settings.SENTRY_URL_PREFIX)
if urlbits.hostname:
settings.ALLOWED_HOSTS = (urlbits.hostname,)
if hasattr(settings, 'SENTRY_ALLOW_REGISTRATION'):
warnings.warn('SENTRY_ALLOW_REGISTRATION is deprecated. Use SENTRY_FEATURES instead.', DeprecationWarning)
settings.SENTRY_FEATURES['auth:register'] = settings.SENTRY_ALLOW_REGISTRATION
def skip_migration_if_applied(settings, app_name, table_name,
name='0001_initial'):
from south.migration import Migrations
from sentry.utils.db import table_exists
import types
if app_name not in settings.INSTALLED_APPS:
return
migration = Migrations(app_name)[name]
def skip_if_table_exists(original):
def wrapped(self):
# TODO: look into why we're having to return some ridiculous
# lambda
if table_exists(table_name):
return lambda x=None: None
return original()
wrapped.__name__ = original.__name__
return wrapped
migration.forwards = types.MethodType(
skip_if_table_exists(migration.forwards), migration)
def on_configure(config):
"""
Executes after settings are full installed and configured.
"""
settings = config['settings']
skip_migration_if_applied(
settings, 'kombu.contrib.django', 'djkombu_queue')
skip_migration_if_applied(
settings, 'social_auth', 'social_auth_association')
def configure(config_path=None):
configure_app(
project='sentry',
config_path=config_path,
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
on_configure=on_configure,
)
def main():
if USE_GEVENT:
sys.stderr.write("Configuring Sentry with gevent bindings\n")
initialize_gevent()
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
from oslo_log import log as logging
import yaml
from tempest import config
from tempest import exceptions
from tempest.services.identity.v2.json import identity_client
import tempest_lib.auth
from tempest_lib.common.utils import data_utils
import tempest_lib.exceptions
LOG = None
CONF = config.CONF
def setup_logging():
global LOG
logging.setup(CONF, __name__)
LOG = logging.getLogger(__name__)
def keystone_admin(opts):
_creds = tempest_lib.auth.KeystoneV2Credentials(
username=opts.os_username,
password=opts.os_password,
tenant_name=opts.os_tenant_name)
auth_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
_auth = tempest_lib.auth.KeystoneV2AuthProvider(
_creds, CONF.identity.uri, **auth_params)
params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
return identity_client.IdentityClientJSON(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**params
)
def create_resources(opts, resources):
admin = keystone_admin(opts)
roles = admin.list_roles()
for u in resources['users']:
u['role_ids'] = []
for r in u.get('roles', ()):
try:
role = filter(lambda r_: r_['name'] == r, roles)[0]
u['role_ids'] += [role['id']]
except IndexError:
raise exceptions.TempestException(
"Role: %s - doesn't exist" % r
)
existing = [x['name'] for x in admin.list_tenants()]
for tenant in resources['tenants']:
if tenant not in existing:
admin.create_tenant(tenant)
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
LOG.info('Tenants created')
for u in resources['users']:
try:
tenant = admin.get_tenant_by_name(u['tenant'])
except tempest_lib.exceptions.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
while True:
try:
admin.get_user_by_username(tenant['id'], u['name'])
except tempest_lib.exceptions.NotFound:
admin.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
break
else:
LOG.warn("User '%s' already exists in this environment. "
"New name generated" % u['name'])
u['name'] = random_user_name(opts.tag, u['prefix'])
LOG.info('Users created')
for u in resources['users']:
try:
tenant = admin.get_tenant_by_name(u['tenant'])
except tempest_lib.exceptions.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
user = admin.get_user_by_username(tenant['id'],
u['name'])
except tempest_lib.exceptions.NotFound:
LOG.error("User: %s - not found" % u['user'])
continue
for r in u['role_ids']:
try:
admin.assign_user_role(tenant['id'], user['id'], r)
except tempest_lib.exceptions.Conflict:
# don't care if it's already assigned
pass
LOG.info('Roles assigned')
LOG.info('Resources deployed successfully!')
def random_user_name(tag, prefix):
if tag:
return data_utils.rand_name('-'.join((tag, prefix)))
else:
return data_utils.rand_name(prefix)
def generate_resources(opts):
spec = [{'number': 1,
'prefix': 'primary',
'roles': (CONF.auth.tempest_roles +
[CONF.object_storage.operator_role])},
{'number': 1,
'prefix': 'alt',
'roles': (CONF.auth.tempest_roles +
[CONF.object_storage.operator_role])},
{'number': 1,
'prefix': 'swift_admin',
'roles': (CONF.auth.tempest_roles +
[CONF.object_storage.operator_role,
CONF.object_storage.reseller_admin_role])},
{'number': 1,
'prefix': 'stack_owner',
'roles': (CONF.auth.tempest_roles +
[CONF.orchestration.stack_owner_role])},
]
if opts.admin:
spec.append({
'number': 1,
'prefix': 'admin',
'roles': (CONF.auth.tempest_roles +
[CONF.identity.admin_role])
})
resources = {'tenants': [],
'users': []}
for count in range(opts.concurrency):
for user_group in spec:
users = [random_user_name(opts.tag, user_group['prefix'])
for _ in range(user_group['number'])]
for user in users:
tenant = '-'.join((user, 'tenant'))
resources['tenants'].append(tenant)
resources['users'].append({
'tenant': tenant,
'name': user,
'pass': data_utils.rand_name(),
'prefix': user_group['prefix'],
'roles': user_group['roles']
})
return resources
def dump_accounts(opts, resources):
accounts = []
for user in resources['users']:
accounts.append({
'username': user['name'],
'tenant_name': user['tenant'],
'password': user['pass'],
'roles': user['roles']
})
if os.path.exists(opts.accounts):
os.rename(opts.accounts, '.'.join((opts.accounts, 'bak')))
with open(opts.accounts, 'w') as f:
yaml.dump(accounts, f, default_flow_style=False)
LOG.info('%s generated successfully!' % opts.accounts)
def get_options():
usage_string = ('account_generator [-h] <ARG> ...\n\n'
'To see help on specific argument, do:\n'
'account_generator <ARG> -h')
parser = argparse.ArgumentParser(
description='Create accounts.yaml file for concurrent test runs. '
'One primary user, one alt user, '
'one swift admin, one stack owner '
'and one admin (optionally) will be created '
'for each concurrent thread.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
usage=usage_string
)
parser.add_argument('-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to tempest config file')
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help='User should have permitions '
'to create new user accounts and '
'tenants. Defaults to env[OS_USERNAME].')
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--tag',
default='',
required=False,
dest='tag',
help='Resources tag')
parser.add_argument('-r', '--concurrency',
default=1,
type=int,
required=True,
dest='concurrency',
help='Concurrency count')
parser.add_argument('--with-admin',
action='store_true',
dest='admin',
help='Create admin in every tenant')
parser.add_argument('accounts',
metavar='accounts_file.yaml',
help='Output accounts yaml file')
opts = parser.parse_args()
if opts.config_file:
config.CONF.set_config_path(opts.config_file)
return opts
def main(opts=None):
if not opts:
opts = get_options()
setup_logging()
resources = generate_resources(opts)
create_resources(opts, resources)
dump_accounts(opts, resources)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.