code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
#!/usr/bin/python # # \file 2_build.py # \brief Build sound # \date 2009-06-03 10:47GMT # \author Jan Boon (Kaetemi) # Python port of game data build pipeline. # Build sound # # NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/> # Copyright (C) 2010 Winch Gate Property Limited # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import time, sys, os, shutil, subprocess, distutils.dir_util sys.path.append("../../configuration") if os.path.isfile("log.log"): os.remove("log.log") log = open("log.log", "w") from scripts import * from buildsite import * from process import * from tools import * from directories import * printLog(log, "") printLog(log, "-------") printLog(log, "--- Build sound") printLog(log, "-------") printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time()))) printLog(log, "") # Find tools BuildSound = findTool(log, ToolDirectories, BuildSoundTool, ToolSuffix) printLog(log, "") # For each sound directory printLog(log, ">>> Build sound <<<") if BuildSound == "": toolLogFail(log, BuildSoundTool, ToolSuffix) else: mkPath(log, LeveldesignDirectory) mkPath(log, LeveldesignDfnDirectory) mkPath(log, DatabaseDirectory + "/" + SoundSamplebanksSourceDirectory) mkPath(log, ExportBuildDirectory + "/" + SoundSheetsBuildDirectory) mkPath(log, ExportBuildDirectory + "/" + SoundSamplebanksBuildDirectory) subprocess.call([ BuildSound, LeveldesignDirectory, LeveldesignDfnDirectory, DatabaseDirectory + "/" + SoundSamplebanksSourceDirectory, ExportBuildDirectory + "/" + SoundSheetsBuildDirectory ]) moveFilesExtNoTree(log, DatabaseDirectory + "/" + SoundSamplebanksSourceDirectory, ExportBuildDirectory + "/" + SoundSamplebanksBuildDirectory, ".sample_bank") printLog(log, "") log.close() # end of file
unknown
codeparrot/codeparrot-clean
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility script for maintainer use to upload current version of the answer key spreadsheet to S3. """ import hashlib import boto from . import answer_key BUCKET_NAME = 'zipline-test-data' def main(): with open(answer_key.ANSWER_KEY_PATH, 'r') as f: md5 = hashlib.md5() while True: buf = f.read(1024) if not buf: break md5.update(buf) local_hash = md5.hexdigest() s3_conn = boto.connect_s3() bucket = s3_conn.get_bucket(BUCKET_NAME) key = boto.s3.key.Key(bucket) key.key = "risk/{local_hash}/risk-answer-key.xlsx".format( local_hash=local_hash) key.set_contents_from_filename(answer_key.ANSWER_KEY_PATH) key.set_acl('public-read') download_link = "http://s3.amazonaws.com/{bucket_name}/{key}".format( bucket_name=BUCKET_NAME, key=key.key) print("Uploaded to key: {key}".format(key=key.key)) print("Download link: {download_link}".format(download_link=download_link)) # Now update checksum file with the recently added answer key. # checksum file update will be then need to be commited via git. with open(answer_key.ANSWER_KEY_CHECKSUMS_PATH, 'a') as checksum_file: checksum_file.write(local_hash) checksum_file.write("\n") if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
#pragma once #include <ATen/Parallel.h> #include <ATen/NumericUtils.h> #include <ATen/cpu/vec/vec.h> #include <ATen/cpu/vec/functional.h> #include <ATen/native/ReductionType.h> #include <c10/util/irange.h> #include <ATen/OpMathType.h> #include <ATen/native/cpu/utils.h> namespace at::native { inline namespace CPU_CAPABILITY { using namespace vec; #define AT_DISPATCH_REDUCTION_TYPES(op, ...) \ [&] { \ switch (op) { \ case ReductionType::SUM: { \ static constexpr auto reduce = ReductionType::SUM; \ return __VA_ARGS__(); \ } \ case ReductionType::MEAN: { \ static constexpr auto reduce = ReductionType::MEAN; \ return __VA_ARGS__(); \ } \ case ReductionType::MIN: { \ static constexpr auto reduce = ReductionType::MIN; \ return __VA_ARGS__(); \ } \ case ReductionType::MAX: { \ static constexpr auto reduce = ReductionType::MAX; \ return __VA_ARGS__(); \ } \ case ReductionType::PROD: { \ static constexpr auto reduce = ReductionType::PROD; \ return __VA_ARGS__(); \ } \ } \ }() template <typename scalar_t, ReductionType reduce> inline vec_scalar_t<scalar_t> init_value() { using acc_t = vec_scalar_t<scalar_t>; acc_t val; if (reduce == ReductionType::SUM || reduce == ReductionType::MEAN) { val = static_cast<acc_t>(0); } else if (reduce == ReductionType::PROD) { val = static_cast<acc_t>(1); } else if (reduce == ReductionType::MAX) { val = -std::numeric_limits<acc_t>::infinity(); } else { TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN); val = std::numeric_limits<acc_t>::infinity(); } return val; } template <typename scalar_t, ReductionType reduce> inline vec_scalar_t<scalar_t> init_value(const std::optional<Scalar>& initial) { using acc_t = vec_scalar_t<scalar_t>; if (initial.has_value()) { return initial.value().to<acc_t>(); } else { return init_value<scalar_t, reduce>(); } } template <typename scalar_t> inline void init(scalar_t* out, int64_t size, const vec_scalar_t<scalar_t>& val) { using Vec = Vectorized<vec_scalar_t<scalar_t>>; map<scalar_t>( [val](Vec x) { return Vec(val); }, out, out, size); } template <typename scalar_t, ReductionType reduce> inline void init(scalar_t* out, int64_t size, const std::optional<Scalar>& initial) { using acc_t = vec_scalar_t<scalar_t>; acc_t val = init_value<scalar_t, reduce>(initial); init(out, size, val); } // overload with `include_self`, used by scatter_reduce template <typename scalar_t, ReductionType reduce> inline void init(scalar_t* out, int64_t size, bool include_self = false) { using acc_t = vec_scalar_t<scalar_t>; if (!include_self) { acc_t val = init_value<scalar_t, reduce>(); init(out, size, val); } } template <typename scalar_t, ReductionType reduce> inline void _init(scalar_t* self_ptr, at::opmath_type<scalar_t>* buffer_ptr, int64_t size, bool include_self) { if (!include_self) { init<at::opmath_type<scalar_t>, reduce>(buffer_ptr, size, include_self); } else { vec::convert(self_ptr, buffer_ptr, size); } } template <typename scalar_t> inline std::enable_if_t<!std::is_same_v<scalar_t, Vec2>, scalar_t> _max(const scalar_t& x, const scalar_t& y) { return at::_isnan(y) ? y : std::max(x, y); } template <typename scalar_t> inline Vectorized<scalar_t> _max(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) { // vec::maximum propagates NaN return vec::maximum(x, y); } template <typename vec_t> inline std::enable_if_t<std::is_same_v<vec_t, Vec2>, Vec2> _max(const vec_t& x, const vec_t& y) { // vec::maximum propagates NaN return maximum(x, y); } template <typename scalar_t> inline std::enable_if_t<!std::is_same_v<scalar_t, Vec2>, scalar_t> _min(const scalar_t& x, const scalar_t& y) { return at::_isnan(y) ? y : std::min(x, y); } template <typename scalar_t> inline Vectorized<scalar_t> _min(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) { // vec::minimum propagates NaN return vec::minimum(x, y); } template <typename vec_t> inline std::enable_if_t<std::is_same_v<vec_t, Vec2>, Vec2> _min(const vec_t& x, const vec_t& y) { // vec::minimum propagates NaN return minimum(x, y); } template <typename scalar_t, typename accumut, typename Op, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map_acc( const Op& vec_fun, accumut* output_data, const accumut* input_data, const scalar_t* input_data2, int64_t size) { using Vec = vec::Vectorized<scalar_t>; using aVec = vec::Vectorized<accumut>; int64_t d = 0; constexpr int64_t kVecSize = Vec::size(); constexpr int64_t kaVecSize = aVec::size(); for (d = 0; d < size - (size % kVecSize); d += kVecSize) { Vec data2_vec = Vec::loadu(input_data2 + d); auto [data2_avec0, data2_avec1] = convert_to_float<scalar_t>(data2_vec); aVec input_vec0 = aVec::loadu(input_data + d); aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize); vec_fun(input_vec0, data2_avec0).store(output_data + d); vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize); } if (size - d > 0) { int64_t tail_size = size - d; Vec data2_vec = Vec::loadu(input_data2 + d, tail_size); auto [data2_avec0, data2_avec1] = convert_to_float<scalar_t>(data2_vec); if (tail_size > kaVecSize) { aVec input_vec0 = aVec::loadu(input_data + d); aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize, tail_size - kaVecSize); vec_fun(input_vec0, data2_avec0).store(output_data + d); vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize, tail_size - kaVecSize); } else { aVec input_vec0 = aVec::loadu(input_data + d, tail_size); vec_fun(input_vec0, data2_avec0).store(output_data + d, tail_size); } } } // for Max and Min, propagate NaN: template <typename T, ReductionType reduce> inline T update(const T& x, const T& y) { if (reduce == ReductionType::SUM || reduce == ReductionType::MEAN) { return x + y; } else if (reduce == ReductionType::PROD) { return x * y; } else if (reduce == ReductionType::MAX) { return _max(x, y); } else { TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN); return _min(x, y); } } template <typename scalar_t, ReductionType reduce> inline void update(scalar_t* out, const scalar_t* data, int64_t K) { using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>; map2<scalar_t>( [](Vec x, Vec y) { return update<Vec, reduce>(x, y); }, out, out, data, K); } template <typename scalar_t, ReductionType reduce, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline void update(at::opmath_type<scalar_t>* out, const scalar_t* data, int64_t K) { using opmath_t = at::opmath_type<scalar_t>; using Vec = vec::Vectorized<opmath_t>; map_acc<scalar_t, opmath_t>( [](Vec x, Vec y) { return update<Vec, reduce>(x, y); }, out, out, data, K); } template <typename scalar_t, ReductionType reduce> inline void write(scalar_t* out, int64_t count, int64_t K) { using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>; if (reduce == ReductionType::MEAN) { if (count > 0) { vec::map<scalar_t>( [count](Vec x) { return x / Vec(count); }, out, out, K); } } } } // namespace CPU_CAPABILITY } // namespace at::native
c
github
https://github.com/pytorch/pytorch
aten/src/ATen/native/cpu/ReduceUtils.h
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.--> *This model was released on 2021-06-11 and added to Hugging Face Transformers on 2023-09-01.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # VITS [VITS (Variational Inference with adversarial learning for end-to-end Text-to-Speech)](https://huggingface.co/papers/2106.06103) is a end-to-end speech synthesis model, simplifying the traditional two-stage text-to-speech (TTS) systems. It's unique because it directly synthesizes speech from text using variational inference, adversarial learning, and normalizing flows to produce natural and expressive speech with diverse rhythms and intonations. You can find all the original VITS checkpoints under the [AI at Meta](https://huggingface.co/facebook?search_models=mms-tts) organization. > [!TIP] > Click on the VITS models in the right sidebar for more examples of how to apply VITS. The example below demonstrates how to generate text based on an image with [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```python import torch from transformers import pipeline, set_seed from scipy.io.wavfile import write set_seed(555) pipe = pipeline( task="text-to-speech", model="facebook/mms-tts-eng", dtype=torch.float16, device=0 ) speech = pipe("Hello, my dog is cute") # Extract audio data and sampling rate audio_data = speech["audio"] sampling_rate = speech["sampling_rate"] # Save as WAV file write("hello.wav", sampling_rate, audio_data.squeeze()) ``` </hfoption> <hfoption id="AutoModel"> ```python import torch import scipy from IPython.display import Audio from transformers import AutoTokenizer, VitsModel, set_seed tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng") model = VitsModel.from_pretrained("facebook/mms-tts-eng", device_map="auto", dtype=torch.float16) inputs = tokenizer("Hello, my dog is cute", return_tensors="pt").to(model.device) set_seed(555) with torch.no_grad(): outputs = model(**inputs) waveform = outputs.waveform[0] scipy.io.wavfile.write("hello.wav", rate=model.config.sampling_rate, data=waveform) # display in Colab notebook Audio(waveform, rate=model.config.sampling_rate) ``` </hfoption> </hfoptions> ## Notes - Set a seed for reproducibility because VITS synthesizes speech non-deterministically. - For languages with non-Roman alphabets (Korean, Arabic, etc.), install the [uroman](https://github.com/isi-nlp/uroman) package to preprocess the text inputs to the Roman alphabet. You can check if the tokenizer requires uroman as shown below. ```py # pip install -U uroman from transformers import VitsTokenizer tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") print(tokenizer.is_uroman) ``` If your language requires uroman, the tokenizer automatically applies it to the text inputs. Python >= 3.10 doesn't require any additional preprocessing steps. For Python < 3.10, follow the steps below. ```bash git clone https://github.com/isi-nlp/uroman.git cd uroman export UROMAN=$(pwd) ``` Create a function to preprocess the inputs. You can either use the bash variable `UROMAN` or pass the directory path directly to the function. ```py import torch from transformers import VitsTokenizer, VitsModel, set_seed import os import subprocess tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-kor") model = VitsModel.from_pretrained("facebook/mms-tts-kor") def uromanize(input_string, uroman_path): """Convert non-Roman strings to Roman using the `uroman` perl package.""" script_path = os.path.join(uroman_path, "bin", "uroman.pl") command = ["perl", script_path] process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Execute the perl command stdout, stderr = process.communicate(input=input_string.encode()) if process.returncode != 0: raise ValueError(f"Error {process.returncode}: {stderr.decode()}") # Return the output as a string and skip the new-line character at the end return stdout.decode()[:-1] text = "이봐 무슨 일이야" uromanized_text = uromanize(text, uroman_path=os.environ["UROMAN"]) inputs = tokenizer(text=uromanized_text, return_tensors="pt") set_seed(555) # make deterministic with torch.no_grad(): outputs = model(inputs["input_ids"]) waveform = outputs.waveform[0] ``` ## VitsConfig [[autodoc]] VitsConfig ## VitsTokenizer [[autodoc]] VitsTokenizer - __call__ - save_vocabulary ## VitsModel [[autodoc]] VitsModel - forward
unknown
github
https://github.com/huggingface/transformers
docs/source/en/model_doc/vits.md
/* * Copyright (c) 2007 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockitousage.bugs.injection; import static org.junit.Assert.assertNotNull; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; // issue 289 @RunWith(MockitoJUnitRunner.class) public class ChildWithSameParentFieldInjectionTest { @InjectMocks private System system; @Mock private SomeService someService; @Test public void parent_field_is_not_null() { assertNotNull(((AbstractSystem) system).someService); } @Test public void child_field_is_not_null() { assertNotNull(system.someService); } public static class System extends AbstractSystem { private SomeService someService; public void doSomethingElse() { someService.doSomething(); } } public static class AbstractSystem { private SomeService someService; public void doSomething() { someService.doSomething(); } } public static class SomeService { public void doSomething() {} } }
java
github
https://github.com/mockito/mockito
mockito-core/src/test/java/org/mockitousage/bugs/injection/ChildWithSameParentFieldInjectionTest.java
/* * contrib/spi/insert_username.c * * insert user name in response to a trigger * usage: insert_username (column_name) */ #include "postgres.h" #include "access/htup_details.h" #include "catalog/pg_type.h" #include "commands/trigger.h" #include "executor/spi.h" #include "miscadmin.h" #include "utils/builtins.h" #include "utils/rel.h" PG_MODULE_MAGIC_EXT( .name = "insert_username", .version = PG_VERSION ); PG_FUNCTION_INFO_V1(insert_username); Datum insert_username(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; Trigger *trigger; /* to get trigger name */ int nargs; /* # of arguments */ Datum newval; /* new value of column */ bool newnull; /* null flag */ char **args; /* arguments */ char *relname; /* triggered relation name */ Relation rel; /* triggered relation */ HeapTuple rettuple = NULL; TupleDesc tupdesc; /* tuple description */ int attnum; /* sanity checks from autoinc.c */ if (!CALLED_AS_TRIGGER(fcinfo)) /* internal error */ elog(ERROR, "insert_username: not fired by trigger manager"); if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) /* internal error */ elog(ERROR, "insert_username: must be fired for row"); if (!TRIGGER_FIRED_BEFORE(trigdata->tg_event)) /* internal error */ elog(ERROR, "insert_username: must be fired before event"); if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) rettuple = trigdata->tg_trigtuple; else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) rettuple = trigdata->tg_newtuple; else /* internal error */ elog(ERROR, "insert_username: cannot process DELETE events"); rel = trigdata->tg_relation; relname = SPI_getrelname(rel); trigger = trigdata->tg_trigger; nargs = trigger->tgnargs; if (nargs != 1) /* internal error */ elog(ERROR, "insert_username (%s): one argument was expected", relname); args = trigger->tgargs; tupdesc = rel->rd_att; attnum = SPI_fnumber(tupdesc, args[0]); if (attnum <= 0) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("\"%s\" has no attribute \"%s\"", relname, args[0]))); if (SPI_gettypeid(tupdesc, attnum) != TEXTOID) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), errmsg("attribute \"%s\" of \"%s\" must be type TEXT", args[0], relname))); /* create fields containing name */ newval = CStringGetTextDatum(GetUserNameFromId(GetUserId(), false)); newnull = false; /* construct new tuple */ rettuple = heap_modify_tuple_by_cols(rettuple, tupdesc, 1, &attnum, &newval, &newnull); pfree(relname); return PointerGetDatum(rettuple); }
c
github
https://github.com/postgres/postgres
contrib/spi/insert_username.c
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class account_change_currency(osv.osv_memory): _name = 'account.change.currency' _description = 'Change Currency' _columns = { 'currency_id': fields.many2one('res.currency', 'Change to', required=True, help="Select a currency to apply on the invoice"), } def view_init(self, cr , uid , fields_list, context=None): obj_inv = self.pool.get('account.invoice') if context is None: context = {} if context.get('active_id',False): if obj_inv.browse(cr, uid, context['active_id']).state != 'draft': raise osv.except_osv(_('Error!'), _('You can only change currency for Draft Invoice.')) pass def change_currency(self, cr, uid, ids, context=None): obj_inv = self.pool.get('account.invoice') obj_inv_line = self.pool.get('account.invoice.line') obj_currency = self.pool.get('res.currency') if context is None: context = {} data = self.browse(cr, uid, ids, context=context)[0] new_currency = data.currency_id.id invoice = obj_inv.browse(cr, uid, context['active_id'], context=context) if invoice.currency_id.id == new_currency: return {} rate = obj_currency.browse(cr, uid, new_currency, context=context).rate for line in invoice.invoice_line: new_price = 0 if invoice.company_id.currency_id.id == invoice.currency_id.id: new_price = line.price_unit * rate if new_price <= 0: raise osv.except_osv(_('Error!'), _('New currency is not configured properly.')) if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id == new_currency: old_rate = invoice.currency_id.rate if old_rate <= 0: raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.')) new_price = line.price_unit / old_rate if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id != new_currency: old_rate = invoice.currency_id.rate if old_rate <= 0: raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.')) new_price = (line.price_unit / old_rate ) * rate obj_inv_line.write(cr, uid, [line.id], {'price_unit': new_price}) obj_inv.write(cr, uid, [invoice.id], {'currency_id': new_currency}, context=context) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
//===- extra/modularize/Modularize.cpp - Check modularized headers --------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Introduction // // This file implements a tool that checks whether a set of headers provides // the consistent definitions required to use modules. It can also check an // existing module map for full coverage of the headers in a directory tree. // // For example, in examining headers, it detects whether the same entity // (say, a NULL macro or size_t typedef) is defined in multiple headers // or whether a header produces different definitions under // different circumstances. These conditions cause modules built from the // headers to behave poorly, and should be fixed before introducing a module // map. // // Modularize takes as input either one or more module maps (by default, // "module.modulemap") or one or more text files containing lists of headers // to check. // // In the case of a module map, the module map must be well-formed in // terms of syntax. Modularize will extract the header file names // from the map. Only normal headers are checked, assuming headers // marked "private", "textual", or "exclude" are not to be checked // as a top-level include, assuming they either are included by // other headers which are checked, or they are not suitable for // modules. // // In the case of a file list, the list is a newline-separated list of headers // to check with respect to each other. // Lines beginning with '#' and empty lines are ignored. // Header file names followed by a colon and other space-separated // file names will include those extra files as dependencies. // The file names can be relative or full paths, but must be on the // same line. // // Modularize also accepts regular clang front-end arguments. // // Usage: modularize [(modularize options)] // [(include-files_list)|(module map)]+ [(front-end-options) ...] // // Options: // -prefix=(optional header path prefix) // Note that unless a "-prefix (header path)" option is specified, // non-absolute file paths in the header list file will be relative // to the header list file directory. Use -prefix to specify a // different directory. // -module-map-path=(module map) // Skip the checks, and instead act as a module.modulemap generation // assistant, generating a module map file based on the header list. // An optional "-root-module=(rootName)" argument can specify a root // module to be created in the generated module.modulemap file. Note // that you will likely need to edit this file to suit the needs of // your headers. // -problem-files-list=(problem files list file name) // For use only with module map assistant. Input list of files that // have problems with respect to modules. These will still be // included in the generated module map, but will be marked as // "excluded" headers. // -root-module=(root module name) // Specifies a root module to be created in the generated // module.modulemap file. // -block-check-header-list-only // Only warn if #include directives are inside extern or namespace // blocks if the included header is in the header list. // -no-coverage-check // Don't do the coverage check. // -coverage-check-only // Only do the coverage check. // -display-file-lists // Display lists of good files (no compile errors), problem files, // and a combined list with problem files preceded by a '#'. // This can be used to quickly determine which files have problems. // The latter combined list might be useful in starting to modularize // a set of headers. You can start with a full list of headers, // use -display-file-lists option, and then use the combined list as // your intermediate list, uncommenting-out headers as you fix them. // // Note that by default, the modularize assumes .h files contain C++ source. // If your .h files in the file list contain another language, you should // append an appropriate -x option to your command line, i.e.: -x c // // Modularization Issue Checks // // In the process of checking headers for modularization issues, modularize // will do normal parsing, reporting normal errors and warnings, // but will also report special error messages like the following: // // error: '(symbol)' defined at multiple locations: // (file):(row):(column) // (file):(row):(column) // // error: header '(file)' has different contents depending on how it was // included // // The latter might be followed by messages like the following: // // note: '(symbol)' in (file) at (row):(column) not always provided // // Checks will also be performed for macro expansions, defined(macro) // expressions, and preprocessor conditional directives that evaluate // inconsistently, and can produce error messages like the following: // // (...)/SubHeader.h:11:5: // #if SYMBOL == 1 // ^ // error: Macro instance 'SYMBOL' has different values in this header, // depending on how it was included. // 'SYMBOL' expanded to: '1' with respect to these inclusion paths: // (...)/Header1.h // (...)/SubHeader.h // (...)/SubHeader.h:3:9: // #define SYMBOL 1 // ^ // Macro defined here. // 'SYMBOL' expanded to: '2' with respect to these inclusion paths: // (...)/Header2.h // (...)/SubHeader.h // (...)/SubHeader.h:7:9: // #define SYMBOL 2 // ^ // Macro defined here. // // Checks will also be performed for '#include' directives that are // nested inside 'extern "C/C++" {}' or 'namespace (name) {}' blocks, // and can produce error message like the following: // // IncludeInExtern.h:2:3 // #include "Empty.h" // ^ // error: Include directive within extern "C" {}. // IncludeInExtern.h:1:1 // extern "C" { // ^ // The "extern "C" {}" block is here. // // See PreprocessorTracker.cpp for additional details. // // Module Map Coverage Check // // The coverage check uses the Clang ModuleMap class to read and parse the // module map file. Starting at the module map file directory, or just the // include paths, if specified, it will collect the names of all the files it // considers headers (no extension, .h, or .inc--if you need more, modify the // isHeader function). It then compares the headers against those referenced // in the module map, either explicitly named, or implicitly named via an // umbrella directory or umbrella file, as parsed by the ModuleMap object. // If headers are found which are not referenced or covered by an umbrella // directory or file, warning messages will be produced, and this program // will return an error code of 1. Other errors result in an error code of 2. // If no problems are found, an error code of 0 is returned. // // Note that in the case of umbrella headers, this tool invokes the compiler // to preprocess the file, and uses a callback to collect the header files // included by the umbrella header or any of its nested includes. If any // front end options are needed for these compiler invocations, these // can be included on the command line after the module map file argument. // // Warning message have the form: // // warning: module.modulemap does not account for file: Level3A.h // // Note that for the case of the module map referencing a file that does // not exist, the module map parser in Clang will (at the time of this // writing) display an error message. // // Module Map Assistant - Module Map Generation // // Modularize also has an option ("-module-map-path=module.modulemap") that will // skip the checks, and instead act as a module.modulemap generation assistant, // generating a module map file based on the header list. An optional // "-root-module=(rootName)" argument can specify a root module to be // created in the generated module.modulemap file. Note that you will likely // need to edit this file to suit the needs of your headers. // // An example command line for generating a module.modulemap file: // // modularize -module-map-path=module.modulemap -root-module=myroot \ // headerlist.txt // // Note that if the headers in the header list have partial paths, sub-modules // will be created for the subdirectories involved, assuming that the // subdirectories contain headers to be grouped into a module, but still with // individual modules for the headers in the subdirectory. // // See the ModuleAssistant.cpp file comments for additional details about the // implementation of the assistant mode. // // Future directions: // // Basically, we want to add new checks for whatever we can check with respect // to checking headers for module'ability. // // Some ideas: // // 1. Omit duplicate "not always provided" messages // // 2. Add options to disable any of the checks, in case // there is some problem with them, or the messages get too verbose. // // 3. Try to figure out the preprocessor conditional directives that // contribute to problems and tie them to the inconsistent definitions. // // 4. There are some legitimate uses of preprocessor macros that // modularize will flag as errors, such as repeatedly #include'ing // a file and using interleaving defined/undefined macros // to change declarations in the included file. Is there a way // to address this? Maybe have modularize accept a list of macros // to ignore. Otherwise you can just exclude the file, after checking // for legitimate errors. // // 5. What else? // // General clean-up and refactoring: // // 1. The Location class seems to be something that we might // want to design to be applicable to a wider range of tools, and stick it // somewhere into Tooling/ in mainline // //===----------------------------------------------------------------------===// #include "Modularize.h" #include "ModularizeUtilities.h" #include "PreprocessorTracker.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Basic/SourceManager.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/FrontendActions.h" #include "clang/Lex/Preprocessor.h" #include "clang/Options/Options.h" #include "clang/Tooling/CompilationDatabase.h" #include "clang/Tooling/Tooling.h" #include "llvm/Option/Arg.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/OptTable.h" #include "llvm/Option/Option.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include <algorithm> #include <iterator> #include <map> #include <string> #include <vector> using namespace clang; using namespace clang::driver; using namespace clang::options; using namespace clang::tooling; using namespace llvm; using namespace llvm::opt; using namespace Modularize; // Option to specify a file name for a list of header files to check. static cl::list<std::string> ListFileNames(cl::Positional, cl::value_desc("list"), cl::desc("<list of one or more header list files>"), cl::CommaSeparated); // Collect all other arguments, which will be passed to the front end. static cl::list<std::string> CC1Arguments(cl::ConsumeAfter, cl::desc("<arguments to be passed to front end>...")); // Option to specify a prefix to be prepended to the header names. static cl::opt<std::string> HeaderPrefix( "prefix", cl::init(""), cl::desc( "Prepend header file paths with this prefix." " If not specified," " the files are considered to be relative to the header list file.")); // Option for assistant mode, telling modularize to output a module map // based on the headers list, and where to put it. static cl::opt<std::string> ModuleMapPath( "module-map-path", cl::init(""), cl::desc("Turn on module map output and specify output path or file name." " If no path is specified and if prefix option is specified," " use prefix for file path.")); // Option to specify list of problem files for assistant. // This will cause assistant to exclude these files. static cl::opt<std::string> ProblemFilesList( "problem-files-list", cl::init(""), cl::desc( "List of files with compilation or modularization problems for" " assistant mode. This will be excluded.")); // Option for assistant mode, telling modularize the name of the root module. static cl::opt<std::string> RootModule("root-module", cl::init(""), cl::desc("Specify the name of the root module.")); // Option for limiting the #include-inside-extern-or-namespace-block // check to only those headers explicitly listed in the header list. // This is a work-around for private includes that purposefully get // included inside blocks. static cl::opt<bool> BlockCheckHeaderListOnly("block-check-header-list-only", cl::init(false), cl::desc("Only warn if #include directives are inside extern or namespace" " blocks if the included header is in the header list.")); // Option for include paths for coverage check. static cl::list<std::string> IncludePaths("I", cl::desc("Include path for coverage check."), cl::value_desc("path")); // Option for disabling the coverage check. static cl::opt<bool> NoCoverageCheck("no-coverage-check", cl::desc("Don't do the coverage check.")); // Option for just doing the coverage check. static cl::opt<bool> CoverageCheckOnly("coverage-check-only", cl::init(false), cl::desc("Only do the coverage check.")); // Option for displaying lists of good, bad, and mixed files. static cl::opt<bool> DisplayFileLists("display-file-lists", cl::init(false), cl::desc("Display lists of good files (no compile errors), problem files," " and a combined list with problem files preceded by a '#'.")); // Save the program name for error messages. const char *Argv0; // Save the command line for comments. std::string CommandLine; // Helper function for finding the input file in an arguments list. static std::string findInputFile(const CommandLineArguments &CLArgs) { llvm::opt::Visibility VisibilityMask(options::CC1Option); unsigned MissingArgIndex, MissingArgCount; SmallVector<const char *, 256> Argv; for (const std::string &CLArg : CLArgs) Argv.push_back(CLArg.c_str()); InputArgList Args = getDriverOptTable().ParseArgs( Argv, MissingArgIndex, MissingArgCount, VisibilityMask); std::vector<std::string> Inputs = Args.getAllArgValues(OPT_INPUT); return ModularizeUtilities::getCanonicalPath(Inputs.back()); } // This arguments adjuster inserts "-include (file)" arguments for header // dependencies. It also inserts a "-w" option and a "-x c++", // if no other "-x" option is present. static ArgumentsAdjuster getModularizeArgumentsAdjuster(DependencyMap &Dependencies) { return [&Dependencies](const CommandLineArguments &Args, StringRef /*unused*/) { std::string InputFile = findInputFile(Args); DependentsVector &FileDependents = Dependencies[InputFile]; CommandLineArguments NewArgs(Args); for (const std::string &Dep : FileDependents) { NewArgs.push_back("-include"); NewArgs.push_back(Dep); } // Ignore warnings. (Insert after "clang_tool" at beginning.) NewArgs.insert(NewArgs.begin() + 1, "-w"); // Since we are compiling .h files, assume C++ unless given a -x option. if (!llvm::is_contained(NewArgs, "-x")) { NewArgs.insert(NewArgs.begin() + 2, "-x"); NewArgs.insert(NewArgs.begin() + 3, "c++"); } return NewArgs; }; } // FIXME: The Location class seems to be something that we might // want to design to be applicable to a wider range of tools, and stick it // somewhere into Tooling/ in mainline struct Location { OptionalFileEntryRef File; unsigned Line = 0, Column = 0; Location() = default; Location(SourceManager &SM, SourceLocation Loc) { Loc = SM.getExpansionLoc(Loc); if (Loc.isInvalid()) return; std::pair<FileID, unsigned> Decomposed = SM.getDecomposedLoc(Loc); File = SM.getFileEntryRefForID(Decomposed.first); if (!File) return; Line = SM.getLineNumber(Decomposed.first, Decomposed.second); Column = SM.getColumnNumber(Decomposed.first, Decomposed.second); } explicit operator bool() const { return File != nullptr; } friend bool operator==(const Location &X, const Location &Y) { return X.File == Y.File && X.Line == Y.Line && X.Column == Y.Column; } friend bool operator!=(const Location &X, const Location &Y) { return !(X == Y); } friend bool operator<(const Location &X, const Location &Y) { return std::tie(X.File, X.Line, X.Column) < std::tie(Y.File, Y.Line, Y.Column); } friend bool operator>(const Location &X, const Location &Y) { return Y < X; } friend bool operator<=(const Location &X, const Location &Y) { return !(Y < X); } friend bool operator>=(const Location &X, const Location &Y) { return !(X < Y); } }; struct Entry { enum EntryKind { EK_Tag, EK_Value, EK_Macro, EK_NumberOfKinds } Kind; Location Loc; StringRef getKindName() { return getKindName(Kind); } static StringRef getKindName(EntryKind kind); }; // Return a string representing the given kind. StringRef Entry::getKindName(Entry::EntryKind kind) { switch (kind) { case EK_Tag: return "tag"; case EK_Value: return "value"; case EK_Macro: return "macro"; case EK_NumberOfKinds: break; } llvm_unreachable("invalid Entry kind"); } struct HeaderEntry { std::string Name; Location Loc; friend bool operator==(const HeaderEntry &X, const HeaderEntry &Y) { return X.Loc == Y.Loc && X.Name == Y.Name; } friend bool operator!=(const HeaderEntry &X, const HeaderEntry &Y) { return !(X == Y); } friend bool operator<(const HeaderEntry &X, const HeaderEntry &Y) { return std::tie(X.Loc, X.Name) < std::tie(Y.Loc, Y.Name); } friend bool operator>(const HeaderEntry &X, const HeaderEntry &Y) { return Y < X; } friend bool operator<=(const HeaderEntry &X, const HeaderEntry &Y) { return !(Y < X); } friend bool operator>=(const HeaderEntry &X, const HeaderEntry &Y) { return !(X < Y); } }; typedef std::vector<HeaderEntry> HeaderContents; class EntityMap : public std::map<std::string, SmallVector<Entry, 2>> { public: DenseMap<FileEntryRef, HeaderContents> HeaderContentMismatches; void add(const std::string &Name, enum Entry::EntryKind Kind, Location Loc) { // Record this entity in its header. HeaderEntry HE = { Name, Loc }; CurHeaderContents[*Loc.File].push_back(HE); // Check whether we've seen this entry before. SmallVector<Entry, 2> &Entries = (*this)[Name]; for (unsigned I = 0, N = Entries.size(); I != N; ++I) { if (Entries[I].Kind == Kind && Entries[I].Loc == Loc) return; } // We have not seen this entry before; record it. Entry E = { Kind, Loc }; Entries.push_back(E); } void mergeCurHeaderContents() { for (auto H = CurHeaderContents.begin(), HEnd = CurHeaderContents.end(); H != HEnd; ++H) { // Sort contents. llvm::sort(H->second); // Record this header and its contents if we haven't seen it before. auto [KnownH, Inserted] = AllHeaderContents.insert(*H); if (Inserted) continue; // If the header contents are the same, we're done. if (H->second == KnownH->second) continue; // Determine what changed. std::set_symmetric_difference( H->second.begin(), H->second.end(), KnownH->second.begin(), KnownH->second.end(), std::back_inserter(HeaderContentMismatches[H->first])); } CurHeaderContents.clear(); } private: DenseMap<FileEntryRef, HeaderContents> CurHeaderContents; DenseMap<FileEntryRef, HeaderContents> AllHeaderContents; }; class CollectEntitiesVisitor : public RecursiveASTVisitor<CollectEntitiesVisitor> { public: CollectEntitiesVisitor(SourceManager &SM, EntityMap &Entities, Preprocessor &PP, PreprocessorTracker &PPTracker, int &HadErrors) : SM(SM), Entities(Entities), PP(PP), PPTracker(PPTracker), HadErrors(HadErrors) {} bool TraverseStmt(Stmt *S) { return true; } bool TraverseType(QualType T) { return true; } bool TraverseTypeLoc(TypeLoc TL) { return true; } bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) { return true; } bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) { return true; } bool TraverseDeclarationNameInfo(DeclarationNameInfo NameInfo) { return true; } bool TraverseTemplateName(TemplateName Template) { return true; } bool TraverseTemplateArgument(const TemplateArgument &Arg) { return true; } bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) { return true; } bool TraverseTemplateArguments(ArrayRef<TemplateArgument>) { return true; } bool TraverseConstructorInitializer(CXXCtorInitializer *Init) { return true; } bool TraverseLambdaCapture(LambdaExpr *LE, const LambdaCapture *C, Expr *Init) { return true; } // Check 'extern "*" {}' block for #include directives. bool VisitLinkageSpecDecl(LinkageSpecDecl *D) { // Bail if not a block. if (!D->hasBraces()) return true; SourceRange BlockRange = D->getSourceRange(); const char *LinkageLabel; switch (D->getLanguage()) { case LinkageSpecLanguageIDs::C: LinkageLabel = "extern \"C\" {}"; break; case LinkageSpecLanguageIDs::CXX: LinkageLabel = "extern \"C++\" {}"; break; } if (!PPTracker.checkForIncludesInBlock(PP, BlockRange, LinkageLabel, errs())) HadErrors = 1; return true; } // Check 'namespace (name) {}' block for #include directives. bool VisitNamespaceDecl(const NamespaceDecl *D) { SourceRange BlockRange = D->getSourceRange(); std::string Label("namespace "); Label += D->getName(); Label += " {}"; if (!PPTracker.checkForIncludesInBlock(PP, BlockRange, Label.c_str(), errs())) HadErrors = 1; return true; } // Collect definition entities. bool VisitNamedDecl(NamedDecl *ND) { // We only care about file-context variables. if (!ND->getDeclContext()->isFileContext()) return true; // Skip declarations that tend to be properly multiply-declared. if (isa<NamespaceDecl>(ND) || isa<UsingDirectiveDecl>(ND) || isa<NamespaceAliasDecl>(ND) || isa<ClassTemplateSpecializationDecl>(ND) || isa<UsingDecl>(ND) || isa<ClassTemplateDecl>(ND) || isa<TemplateTypeParmDecl>(ND) || isa<TypeAliasTemplateDecl>(ND) || isa<UsingShadowDecl>(ND) || isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) || (isa<TagDecl>(ND) && !cast<TagDecl>(ND)->isThisDeclarationADefinition())) return true; // Skip anonymous declarations. if (!ND->getDeclName()) return true; // Get the qualified name. std::string Name; llvm::raw_string_ostream OS(Name); ND->printQualifiedName(OS); if (Name.empty()) return true; Location Loc(SM, ND->getLocation()); if (!Loc) return true; Entities.add(Name, isa<TagDecl>(ND) ? Entry::EK_Tag : Entry::EK_Value, Loc); return true; } private: SourceManager &SM; EntityMap &Entities; Preprocessor &PP; PreprocessorTracker &PPTracker; int &HadErrors; }; class CollectEntitiesConsumer : public ASTConsumer { public: CollectEntitiesConsumer(EntityMap &Entities, PreprocessorTracker &preprocessorTracker, Preprocessor &PP, StringRef InFile, int &HadErrors) : Entities(Entities), PPTracker(preprocessorTracker), PP(PP), HadErrors(HadErrors) { PPTracker.handlePreprocessorEntry(PP, InFile); } ~CollectEntitiesConsumer() override { PPTracker.handlePreprocessorExit(); } void HandleTranslationUnit(ASTContext &Ctx) override { SourceManager &SM = Ctx.getSourceManager(); // Collect declared entities. CollectEntitiesVisitor(SM, Entities, PP, PPTracker, HadErrors) .TraverseDecl(Ctx.getTranslationUnitDecl()); // Collect macro definitions. for (Preprocessor::macro_iterator M = PP.macro_begin(), MEnd = PP.macro_end(); M != MEnd; ++M) { Location Loc(SM, M->second.getLatest()->getLocation()); if (!Loc) continue; Entities.add(M->first->getName().str(), Entry::EK_Macro, Loc); } // Merge header contents. Entities.mergeCurHeaderContents(); } private: EntityMap &Entities; PreprocessorTracker &PPTracker; Preprocessor &PP; int &HadErrors; }; class CollectEntitiesAction : public SyntaxOnlyAction { public: CollectEntitiesAction(EntityMap &Entities, PreprocessorTracker &preprocessorTracker, int &HadErrors) : Entities(Entities), PPTracker(preprocessorTracker), HadErrors(HadErrors) {} protected: std::unique_ptr<clang::ASTConsumer> CreateASTConsumer(CompilerInstance &CI, StringRef InFile) override { return std::make_unique<CollectEntitiesConsumer>( Entities, PPTracker, CI.getPreprocessor(), InFile, HadErrors); } private: EntityMap &Entities; PreprocessorTracker &PPTracker; int &HadErrors; }; class ModularizeFrontendActionFactory : public FrontendActionFactory { public: ModularizeFrontendActionFactory(EntityMap &Entities, PreprocessorTracker &preprocessorTracker, int &HadErrors) : Entities(Entities), PPTracker(preprocessorTracker), HadErrors(HadErrors) {} std::unique_ptr<FrontendAction> create() override { return std::make_unique<CollectEntitiesAction>(Entities, PPTracker, HadErrors); } private: EntityMap &Entities; PreprocessorTracker &PPTracker; int &HadErrors; }; class CompileCheckVisitor : public RecursiveASTVisitor<CompileCheckVisitor> { public: CompileCheckVisitor() {} bool TraverseStmt(Stmt *S) { return true; } bool TraverseType(QualType T) { return true; } bool TraverseTypeLoc(TypeLoc TL) { return true; } bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) { return true; } bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) { return true; } bool TraverseDeclarationNameInfo(DeclarationNameInfo NameInfo) { return true; } bool TraverseTemplateName(TemplateName Template) { return true; } bool TraverseTemplateArgument(const TemplateArgument &Arg) { return true; } bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) { return true; } bool TraverseTemplateArguments(ArrayRef<TemplateArgument>) { return true; } bool TraverseConstructorInitializer(CXXCtorInitializer *Init) { return true; } bool TraverseLambdaCapture(LambdaExpr *LE, const LambdaCapture *C, Expr *Init) { return true; } // Check 'extern "*" {}' block for #include directives. bool VisitLinkageSpecDecl(LinkageSpecDecl *D) { return true; } // Check 'namespace (name) {}' block for #include directives. bool VisitNamespaceDecl(const NamespaceDecl *D) { return true; } // Collect definition entities. bool VisitNamedDecl(NamedDecl *ND) { return true; } }; class CompileCheckConsumer : public ASTConsumer { public: CompileCheckConsumer() {} void HandleTranslationUnit(ASTContext &Ctx) override { CompileCheckVisitor().TraverseDecl(Ctx.getTranslationUnitDecl()); } }; class CompileCheckAction : public SyntaxOnlyAction { public: CompileCheckAction() {} protected: std::unique_ptr<clang::ASTConsumer> CreateASTConsumer(CompilerInstance &CI, StringRef InFile) override { return std::make_unique<CompileCheckConsumer>(); } }; class CompileCheckFrontendActionFactory : public FrontendActionFactory { public: CompileCheckFrontendActionFactory() {} std::unique_ptr<FrontendAction> create() override { return std::make_unique<CompileCheckAction>(); } }; int main(int Argc, const char **Argv) { // Save program name for error messages. Argv0 = Argv[0]; // Save program arguments for use in module.modulemap comment. CommandLine = std::string(sys::path::stem(sys::path::filename(Argv0))); for (int ArgIndex = 1; ArgIndex < Argc; ArgIndex++) { CommandLine.append(" "); CommandLine.append(Argv[ArgIndex]); } // This causes options to be parsed. cl::ParseCommandLineOptions(Argc, Argv, "modularize.\n"); // No go if we have no header list file. if (ListFileNames.size() == 0) { cl::PrintHelpMessage(); return 1; } std::unique_ptr<ModularizeUtilities> ModUtil; int HadErrors = 0; ModUtil.reset( ModularizeUtilities::createModularizeUtilities( ListFileNames, HeaderPrefix, ProblemFilesList)); // Get header file names and dependencies. if (ModUtil->loadAllHeaderListsAndDependencies()) HadErrors = 1; // If we are in assistant mode, output the module map and quit. if (ModuleMapPath.length() != 0) { if (!createModuleMap(ModuleMapPath, ModUtil->HeaderFileNames, ModUtil->ProblemFileNames, ModUtil->Dependencies, HeaderPrefix, RootModule)) return 1; // Failed. return 0; // Success - Skip checks in assistant mode. } // If we're doing module maps. if (!NoCoverageCheck && ModUtil->HasModuleMap) { // Do coverage check. if (ModUtil->doCoverageCheck(IncludePaths, CommandLine)) HadErrors = 1; } // Bail early if only doing the coverage check. if (CoverageCheckOnly) return HadErrors; // Create the compilation database. SmallString<256> PathBuf; sys::fs::current_path(PathBuf); std::unique_ptr<CompilationDatabase> Compilations; Compilations.reset( new FixedCompilationDatabase(Twine(PathBuf), CC1Arguments)); // Create preprocessor tracker, to watch for macro and conditional problems. std::unique_ptr<PreprocessorTracker> PPTracker( PreprocessorTracker::create(ModUtil->HeaderFileNames, BlockCheckHeaderListOnly)); // Coolect entities here. EntityMap Entities; // Because we can't easily determine which files failed // during the tool run, if we're collecting the file lists // for display, we do a first compile pass on individual // files to find which ones don't compile stand-alone. if (DisplayFileLists) { // First, make a pass to just get compile errors. for (auto &CompileCheckFile : ModUtil->HeaderFileNames) { llvm::SmallVector<std::string, 32> CompileCheckFileArray; CompileCheckFileArray.push_back(CompileCheckFile); ClangTool CompileCheckTool(*Compilations, CompileCheckFileArray); CompileCheckTool.appendArgumentsAdjuster( getModularizeArgumentsAdjuster(ModUtil->Dependencies)); int CompileCheckFileErrors = 0; // FIXME: use newFrontendActionFactory. CompileCheckFrontendActionFactory CompileCheckFactory; CompileCheckFileErrors |= CompileCheckTool.run(&CompileCheckFactory); if (CompileCheckFileErrors != 0) { ModUtil->addUniqueProblemFile(CompileCheckFile); // Save problem file. HadErrors |= 1; } else ModUtil->addNoCompileErrorsFile(CompileCheckFile); // Save good file. } } // Then we make another pass on the good files to do the rest of the work. ClangTool Tool(*Compilations, (DisplayFileLists ? ModUtil->GoodFileNames : ModUtil->HeaderFileNames)); Tool.appendArgumentsAdjuster( getModularizeArgumentsAdjuster(ModUtil->Dependencies)); ModularizeFrontendActionFactory Factory(Entities, *PPTracker, HadErrors); HadErrors |= Tool.run(&Factory); // Create a place to save duplicate entity locations, separate bins per kind. typedef SmallVector<Location, 8> LocationArray; typedef SmallVector<LocationArray, Entry::EK_NumberOfKinds> EntryBinArray; EntryBinArray EntryBins; int KindIndex; for (KindIndex = 0; KindIndex < Entry::EK_NumberOfKinds; ++KindIndex) { LocationArray Array; EntryBins.push_back(Array); } // Check for the same entity being defined in multiple places. for (EntityMap::iterator E = Entities.begin(), EEnd = Entities.end(); E != EEnd; ++E) { // If only one occurrence, exit early. if (E->second.size() == 1) continue; // Clear entity locations. for (EntryBinArray::iterator CI = EntryBins.begin(), CE = EntryBins.end(); CI != CE; ++CI) { CI->clear(); } // Walk the entities of a single name, collecting the locations, // separated into separate bins. for (unsigned I = 0, N = E->second.size(); I != N; ++I) { EntryBins[E->second[I].Kind].push_back(E->second[I].Loc); } // Report any duplicate entity definition errors. int KindIndex = 0; for (EntryBinArray::iterator DI = EntryBins.begin(), DE = EntryBins.end(); DI != DE; ++DI, ++KindIndex) { int ECount = DI->size(); // If only 1 occurrence of this entity, skip it, we only report duplicates. if (ECount <= 1) continue; LocationArray::iterator FI = DI->begin(); StringRef kindName = Entry::getKindName((Entry::EntryKind)KindIndex); errs() << "error: " << kindName << " '" << E->first << "' defined at multiple locations:\n"; for (LocationArray::iterator FE = DI->end(); FI != FE; ++FI) { errs() << " " << FI->File->getName() << ":" << FI->Line << ":" << FI->Column << "\n"; ModUtil->addUniqueProblemFile(std::string(FI->File->getName())); } HadErrors = 1; } } // Complain about macro instance in header files that differ based on how // they are included. if (PPTracker->reportInconsistentMacros(errs())) HadErrors = 1; // Complain about preprocessor conditional directives in header files that // differ based on how they are included. if (PPTracker->reportInconsistentConditionals(errs())) HadErrors = 1; // Complain about any headers that have contents that differ based on how // they are included. // FIXME: Could we provide information about which preprocessor conditionals // are involved? for (auto H = Entities.HeaderContentMismatches.begin(), HEnd = Entities.HeaderContentMismatches.end(); H != HEnd; ++H) { if (H->second.empty()) { errs() << "internal error: phantom header content mismatch\n"; continue; } HadErrors = 1; ModUtil->addUniqueProblemFile(std::string(H->first.getName())); errs() << "error: header '" << H->first.getName() << "' has different contents depending on how it was included.\n"; for (unsigned I = 0, N = H->second.size(); I != N; ++I) { errs() << "note: '" << H->second[I].Name << "' in " << H->second[I].Loc.File->getName() << " at " << H->second[I].Loc.Line << ":" << H->second[I].Loc.Column << " not always provided\n"; } } if (DisplayFileLists) { ModUtil->displayProblemFiles(); ModUtil->displayGoodFiles(); ModUtil->displayCombinedFiles(); } return HadErrors; }
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/modularize/Modularize.cpp
#!/usr/bin/env python from __future__ import print_function import argparse import multiprocessing import random import sys import numpy as np import chainer import chainer.cuda from chainer import training from chainer.training import extensions import chainermn import models.alex as alex import models.googlenet as googlenet import models.googlenetbn as googlenetbn import models.nin as nin import models.resnet50 as resnet50 # Check Python version if it supports multiprocessing.set_start_method, # which was introduced in Python 3.4 major, minor, _, _, _ = sys.version_info if major <= 2 or (major == 3 and minor < 4): sys.stderr.write('Error: ImageNet example uses ' 'chainer.iterators.MultiprocessIterator, ' 'which works only with Python >= 3.4. \n' 'For more details, see ' 'http://chainermn.readthedocs.io/en/master/' 'tutorial/tips_faqs.html#using-multiprocessiterator\n') exit(-1) class PreprocessedDataset(chainer.dataset.DatasetMixin): def __init__(self, path, root, mean, crop_size, random=True): self.base = chainer.datasets.LabeledImageDataset(path, root) self.mean = mean.astype(np.float32) self.crop_size = crop_size self.random = random def __len__(self): return len(self.base) def get_example(self, i): # It reads the i-th image/label pair and return a preprocessed image. # It applies following preprocesses: # - Cropping (random or center rectangular) # - Random flip # - Scaling to [0, 1] value crop_size = self.crop_size image, label = self.base[i] _, h, w = image.shape if self.random: # Randomly crop a region and flip the image top = random.randint(0, h - crop_size - 1) left = random.randint(0, w - crop_size - 1) if random.randint(0, 1): image = image[:, :, ::-1] else: # Crop the center top = (h - crop_size) // 2 left = (w - crop_size) // 2 bottom = top + crop_size right = left + crop_size image = image[:, top:bottom, left:right] image -= self.mean[:, top:bottom, left:right] image *= (1.0 / 255.0) # Scale to [0, 1] return image, label # chainermn.create_multi_node_evaluator can be also used with user customized # evaluator classes that inherit chainer.training.extensions.Evaluator. class TestModeEvaluator(extensions.Evaluator): def evaluate(self): model = self.get_target('main') model.train = False ret = super(TestModeEvaluator, self).evaluate() model.train = True return ret def main(): # Check if GPU is available # (ImageNet example does not support CPU execution) if not chainer.cuda.available: raise RuntimeError('ImageNet requires GPU support.') archs = { 'alex': alex.Alex, 'googlenet': googlenet.GoogLeNet, 'googlenetbn': googlenetbn.GoogLeNetBN, 'nin': nin.NIN, 'resnet50': resnet50.ResNet50, } parser = argparse.ArgumentParser( description='Learning convnet from ILSVRC2012 dataset') parser.add_argument('train', help='Path to training image-label list file') parser.add_argument('val', help='Path to validation image-label list file') parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin', help='Convnet architecture') parser.add_argument('--batchsize', '-B', type=int, default=32, help='Learning minibatch size') parser.add_argument('--epoch', '-E', type=int, default=10, help='Number of epochs to train') parser.add_argument('--initmodel', help='Initialize the model from given file') parser.add_argument('--loaderjob', '-j', type=int, help='Number of parallel data loading processes') parser.add_argument('--mean', '-m', default='mean.npy', help='Mean file (computed by compute_mean.py)') parser.add_argument('--resume', '-r', default='', help='Initialize the trainer from given file') parser.add_argument('--out', '-o', default='result', help='Output directory') parser.add_argument('--root', '-R', default='.', help='Root directory path of image files') parser.add_argument('--val_batchsize', '-b', type=int, default=250, help='Validation minibatch size') parser.add_argument('--test', action='store_true') parser.add_argument('--communicator', default='hierarchical') parser.set_defaults(test=False) args = parser.parse_args() # Start method of multiprocessing module need to be changed if we # are using InfiniBand and MultiprocessIterator. This is because # processes often crash when calling fork if they are using # Infiniband. (c.f., # https://www.open-mpi.org/faq/?category=tuning#fork-warning ) # Also, just setting the start method does not seem to be # sufficient to actually launch the forkserver processes, so also # start a dummy process. # See also our document: # https://chainermn.readthedocs.io/en/stable/tutorial/tips_faqs.html#using-multiprocessiterator # This must be done *before* ``chainermn.create_communicator``!!! multiprocessing.set_start_method('forkserver') p = multiprocessing.Process() p.start() p.join() # Prepare ChainerMN communicator. comm = chainermn.create_communicator(args.communicator) device = comm.intra_rank if comm.rank == 0: print('==========================================') print('Num process (COMM_WORLD): {}'.format(comm.size)) print('Using {} communicator'.format(args.communicator)) print('Using {} arch'.format(args.arch)) print('Num Minibatch-size: {}'.format(args.batchsize)) print('Num epoch: {}'.format(args.epoch)) print('==========================================') model = archs[args.arch]() if args.initmodel: print('Load model from', args.initmodel) chainer.serializers.load_npz(args.initmodel, model) chainer.cuda.get_device_from_id(device).use() # Make the GPU current model.to_gpu() # Split and distribute the dataset. Only worker 0 loads the whole dataset. # Datasets of worker 0 are evenly split and distributed to all workers. mean = np.load(args.mean) if comm.rank == 0: train = PreprocessedDataset(args.train, args.root, mean, model.insize) val = PreprocessedDataset( args.val, args.root, mean, model.insize, False) else: train = None val = None train = chainermn.scatter_dataset(train, comm, shuffle=True) val = chainermn.scatter_dataset(val, comm) # A workaround for processes crash should be done before making # communicator above, when using fork (e.g. MultiProcessIterator) # along with Infiniband. train_iter = chainer.iterators.MultiprocessIterator( train, args.batchsize, n_processes=args.loaderjob) val_iter = chainer.iterators.MultiprocessIterator( val, args.val_batchsize, repeat=False, n_processes=args.loaderjob) # Create a multi node optimizer from a standard Chainer optimizer. optimizer = chainermn.create_multi_node_optimizer( chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9), comm) optimizer.setup(model) # Set up a trainer updater = training.StandardUpdater(train_iter, optimizer, device=device) trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out) checkpoint_interval = (10, 'iteration') if args.test else (1, 'epoch') val_interval = (10, 'iteration') if args.test else (1, 'epoch') log_interval = (10, 'iteration') if args.test else (1, 'epoch') checkpointer = chainermn.create_multi_node_checkpointer( name='imagenet-example', comm=comm) checkpointer.maybe_load(trainer, optimizer) trainer.extend(checkpointer, trigger=checkpoint_interval) # Create a multi node evaluator from an evaluator. evaluator = TestModeEvaluator(val_iter, model, device=device) evaluator = chainermn.create_multi_node_evaluator(evaluator, comm) trainer.extend(evaluator, trigger=val_interval) # Some display and output extensions are necessary only for one worker. # (Otherwise, there would just be repeated outputs.) if comm.rank == 0: trainer.extend(extensions.DumpGraph('main/loss')) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend(extensions.PrintReport([ 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'lr' ]), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) if args.resume: chainer.serializers.load_npz(args.resume, trainer) trainer.run() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.language import search_language import logging log = logging.getLogger(__name__) def guess_language(string, node, skip=None): if skip: relative_skip = [] for entry in skip: node_idx = entry['node_idx'] span = entry['span'] if node_idx == node.node_idx[:len(node_idx)]: relative_span = (span[0] - node.offset + 1, span[1] - node.offset + 1) relative_skip.append(relative_span) skip = relative_skip language, span, confidence = search_language(string, skip=skip) if language: return (Guess({'language': language}, confidence=confidence, raw= string[span[0]:span[1]]), span) return None, None guess_language.use_node = True def process(mtree, *args, **kwargs): SingleNodeGuesser(guess_language, None, log, *args, **kwargs).process(mtree) # Note: 'language' is promoted to 'subtitleLanguage' in the post_process transfo
unknown
codeparrot/codeparrot-clean
"""Rudimentary parser for C struct definitions.""" import re PyObject_HEAD = "PyObject_HEAD" PyObject_VAR_HEAD = "PyObject_VAR_HEAD" rx_name = re.compile("} (\w+);") class Struct: def __init__(self, name, head, members): self.name = name self.head = head self.members = members def get_type(self, name): for _name, type in self.members: if name == _name: return type raise ValueError, "no member named %s" % name def parse(s): """Parse a C struct definition. The parser is very restricted in what it will accept. """ lines = filter(None, s.split("\n")) # get non-empty lines assert lines[0].strip() == "typedef struct {" pyhead = lines[1].strip() assert (pyhead.startswith("PyObject") and pyhead.endswith("HEAD")) members = [] for line in lines[2:]: line = line.strip() if line.startswith("}"): break assert line.endswith(";") line = line[:-1] words = line.split() name = words[-1] type = " ".join(words[:-1]) if name[0] == "*": name = name[1:] type += " *" members.append((name, type)) name = None mo = rx_name.search(line) assert mo is not None name = mo.group(1) return Struct(name, pyhead, members)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from __future__ import print_function import ast import copy import json import logging from collections import OrderedDict from time import time from lxml import html from lxml import etree from werkzeug import urls from odoo import api, models, tools from odoo.tools.safe_eval import assert_valid_codeobj, _BUILTINS, _SAFE_OPCODES from odoo.tools.misc import get_lang from odoo.http import request from odoo.modules.module import get_resource_path from odoo.addons.base.models.qweb import QWeb, Contextifier from odoo.addons.base.models.assetsbundle import AssetsBundle _logger = logging.getLogger(__name__) class IrQWeb(models.AbstractModel, QWeb): """ Base QWeb rendering engine * to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and create new models called :samp:`ir.qweb.field.{widget}` Beware that if you need extensions or alterations which could be incompatible with other subsystems, you should create a local object inheriting from ``ir.qweb`` and customize that. """ _name = 'ir.qweb' _description = 'Qweb' @api.model def _render(self, id_or_xml_id, values=None, **options): """ render(id_or_xml_id, values, **options) Render the template specified by the given name. :param id_or_xml_id: name or etree (see get_template) :param dict values: template values to be used for rendering :param options: used to compile the template (the dict available for the rendering is frozen) * ``load`` (function) overrides the load method * ``profile`` (float) profile the rendering (use astor lib) (filter profile line with time ms >= profile) """ context = dict(self.env.context, dev_mode='qweb' in tools.config['dev_mode']) context.update(options) result = super(IrQWeb, self)._render(id_or_xml_id, values=values, **context) if b'data-pagebreak=' not in result: return result fragments = html.fragments_fromstring(result.decode('utf-8')) for fragment in fragments: for row in fragment.iterfind('.//tr[@data-pagebreak]'): table = next(row.iterancestors('table')) newtable = html.Element('table', attrib=dict(table.attrib)) thead = table.find('thead') if thead: newtable.append(copy.deepcopy(thead)) # TODO: copy caption & tfoot as well? # TODO: move rows in a tbody if row.getparent() is one? pos = row.get('data-pagebreak') assert pos in ('before', 'after') for sibling in row.getparent().iterchildren('tr'): if sibling is row: if pos == 'after': newtable.append(sibling) break newtable.append(sibling) table.addprevious(newtable) table.addprevious(html.Element('div', attrib={ 'style': 'page-break-after: always' })) return b''.join(html.tostring(f) for f in fragments) def default_values(self): """ attributes add to the values for each computed template """ default = super(IrQWeb, self).default_values() default.update(request=request, cache_assets=round(time()/180), true=True, false=False) # true and false added for backward compatibility to remove after v10 return default # assume cache will be invalidated by third party on write to ir.ui.view def _get_template_cache_keys(self): """ Return the list of context keys to use for caching ``_get_template``. """ return ['lang', 'inherit_branding', 'editable', 'translatable', 'edit_translations', 'website_id'] # apply ormcache_context decorator unless in dev mode... @tools.conditional( 'xml' not in tools.config['dev_mode'], tools.ormcache('id_or_xml_id', 'tuple(options.get(k) for k in self._get_template_cache_keys())'), ) def compile(self, id_or_xml_id, options): try: id_or_xml_id = int(id_or_xml_id) except: pass return super(IrQWeb, self).compile(id_or_xml_id, options=options) def _load(self, name, options): lang = options.get('lang', get_lang(self.env).code) env = self.env if lang != env.context.get('lang'): env = env(context=dict(env.context, lang=lang)) view_id = self.env['ir.ui.view'].get_view_id(name) template = env['ir.ui.view'].sudo()._read_template(view_id) # QWeb's `_read_template` will check if one of the first children of # what we send to it has a "t-name" attribute having `name` as value # to consider it has found it. As it'll never be the case when working # with view ids or children view or children primary views, force it here. def is_child_view(view_name): view_id = self.env['ir.ui.view'].get_view_id(view_name) view = self.env['ir.ui.view'].sudo().browse(view_id) return view.inherit_id is not None if isinstance(name, int) or is_child_view(name): view = etree.fromstring(template) for node in view: if node.get('t-name'): node.set('t-name', str(name)) return view else: return template # order def _directives_eval_order(self): directives = super(IrQWeb, self)._directives_eval_order() directives.insert(directives.index('call'), 'lang') directives.insert(directives.index('field'), 'call-assets') return directives # compile directives def _compile_directive_lang(self, el, options): lang = el.attrib.pop('t-lang', get_lang(self.env).code) if el.get('t-call-options'): el.set('t-call-options', el.get('t-call-options')[0:-1] + u', "lang": %s}' % lang) else: el.set('t-call-options', u'{"lang": %s}' % lang) return self._compile_node(el, options) def _compile_directive_call_assets(self, el, options): """ This special 't-call' tag can be used in order to aggregate/minify javascript and css assets""" if len(el): raise SyntaxError("t-call-assets cannot contain children nodes") # nodes = self._get_asset_nodes(xmlid, options, css=css, js=js, debug=values.get('debug'), async=async, values=values) # # for index, (tagName, t_attrs, content) in enumerate(nodes): # if index: # append('\n ') # append('<') # append(tagName) # # self._post_processing_att(tagName, t_attrs, options) # for name, value in t_attrs.items(): # if value or isinstance(value, string_types)): # append(u' ') # append(name) # append(u'="') # append(escape(pycompat.to_text((value))) # append(u'"') # # if not content and tagName in self._void_elements: # append('/>') # else: # append('>') # if content: # append(content) # append('</') # append(tagName) # append('>') # space = el.getprevious() is not None and el.getprevious().tail or el.getparent().text sep = u'\n' + space.rsplit('\n').pop() return [ ast.Assign( targets=[ast.Name(id='nodes', ctx=ast.Store())], value=ast.Call( func=ast.Attribute( value=ast.Name(id='self', ctx=ast.Load()), attr='_get_asset_nodes', ctx=ast.Load() ), args=[ ast.Str(el.get('t-call-assets')), ast.Name(id='options', ctx=ast.Load()), ], keywords=[ ast.keyword('css', self._get_attr_bool(el.get('t-css', True))), ast.keyword('js', self._get_attr_bool(el.get('t-js', True))), ast.keyword('debug', ast.Call( func=ast.Attribute( value=ast.Name(id='values', ctx=ast.Load()), attr='get', ctx=ast.Load() ), args=[ast.Str('debug')], keywords=[], starargs=None, kwargs=None )), ast.keyword('async_load', self._get_attr_bool(el.get('async_load', False))), ast.keyword('defer_load', self._get_attr_bool(el.get('defer_load', False))), ast.keyword('lazy_load', self._get_attr_bool(el.get('lazy_load', False))), ast.keyword('values', ast.Name(id='values', ctx=ast.Load())), ], starargs=None, kwargs=None ) ), ast.For( target=ast.Tuple(elts=[ ast.Name(id='index', ctx=ast.Store()), ast.Tuple(elts=[ ast.Name(id='tagName', ctx=ast.Store()), ast.Name(id='t_attrs', ctx=ast.Store()), ast.Name(id='content', ctx=ast.Store()) ], ctx=ast.Store()) ], ctx=ast.Store()), iter=ast.Call( func=ast.Name(id='enumerate', ctx=ast.Load()), args=[ast.Name(id='nodes', ctx=ast.Load())], keywords=[], starargs=None, kwargs=None ), body=[ ast.If( test=ast.Name(id='index', ctx=ast.Load()), body=[self._append(ast.Str(sep))], orelse=[] ), self._append(ast.Str(u'<')), self._append(ast.Name(id='tagName', ctx=ast.Load())), ] + self._append_attributes() + [ ast.If( test=ast.BoolOp( op=ast.And(), values=[ ast.UnaryOp(ast.Not(), ast.Name(id='content', ctx=ast.Load()), lineno=0, col_offset=0), ast.Compare( left=ast.Name(id='tagName', ctx=ast.Load()), ops=[ast.In()], comparators=[ast.Attribute( value=ast.Name(id='self', ctx=ast.Load()), attr='_void_elements', ctx=ast.Load() )] ), ] ), body=[self._append(ast.Str(u'/>'))], orelse=[ self._append(ast.Str(u'>')), ast.If( test=ast.Name(id='content', ctx=ast.Load()), body=[self._append(ast.Name(id='content', ctx=ast.Load()))], orelse=[] ), self._append(ast.Str(u'</')), self._append(ast.Name(id='tagName', ctx=ast.Load())), self._append(ast.Str(u'>')), ] ) ], orelse=[] ) ] # method called by computing code def get_asset_bundle(self, xmlid, files, env=None): return AssetsBundle(xmlid, files, env=env) @tools.conditional( # in non-xml-debug mode we want assets to be cached forever, and the admin can force a cache clear # by restarting the server after updating the source code (or using the "Clear server cache" in debug tools) 'xml' not in tools.config['dev_mode'], tools.ormcache_context('xmlid', 'options.get("lang", "en_US")', 'css', 'js', 'debug', 'async_load', 'defer_load', 'lazy_load', keys=("website_id",)), ) def _get_asset_nodes(self, xmlid, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, values=None): files, remains = self._get_asset_content(xmlid, options) asset = self.get_asset_bundle(xmlid, files, env=self.env) remains = [node for node in remains if (css and node[0] == 'link') or (js and node[0] != 'link')] return remains + asset.to_node(css=css, js=js, debug=debug, async_load=async_load, defer_load=defer_load, lazy_load=lazy_load) def _get_asset_link_urls(self, xmlid, options): asset_nodes = self._get_asset_nodes(xmlid, options, js=False) return [node[1]['href'] for node in asset_nodes if node[0] == 'link'] @tools.ormcache_context('xmlid', 'options.get("lang", "en_US")', keys=("website_id",)) def _get_asset_content(self, xmlid, options): options = dict(options, inherit_branding=False, inherit_branding_auto=False, edit_translations=False, translatable=False, rendering_bundle=True) options['website_id'] = self.env.context.get('website_id') IrQweb = self.env['ir.qweb'].with_context(options) def can_aggregate(url): return not urls.url_parse(url).scheme and not urls.url_parse(url).netloc and not url.startswith('/web/content') # TODO: This helper can be used by any template that wants to embedd the backend. # It is currently necessary because the ir.ui.view bundle inheritance does not # match the module dependency graph. def get_modules_order(): if request: from odoo.addons.web.controllers.main import module_boot return json.dumps(module_boot()) return '[]' template = IrQweb._render(xmlid, {"get_modules_order": get_modules_order}) files = [] remains = [] for el in html.fragments_fromstring(template): if isinstance(el, html.HtmlElement): href = el.get('href', '') src = el.get('src', '') atype = el.get('type') media = el.get('media') if can_aggregate(href) and (el.tag == 'style' or (el.tag == 'link' and el.get('rel') == 'stylesheet')): if href.endswith('.sass'): atype = 'text/sass' elif href.endswith('.scss'): atype = 'text/scss' elif href.endswith('.less'): atype = 'text/less' if atype not in ('text/less', 'text/scss', 'text/sass'): atype = 'text/css' path = [segment for segment in href.split('/') if segment] filename = get_resource_path(*path) if path else None files.append({'atype': atype, 'url': href, 'filename': filename, 'content': el.text, 'media': media}) elif can_aggregate(src) and el.tag == 'script': atype = 'text/javascript' path = [segment for segment in src.split('/') if segment] filename = get_resource_path(*path) if path else None files.append({'atype': atype, 'url': src, 'filename': filename, 'content': el.text, 'media': media}) else: remains.append((el.tag, OrderedDict(el.attrib), el.text)) else: # the other cases are ignored pass return (files, remains) def _get_field(self, record, field_name, expression, tagName, field_options, options, values): field = record._fields[field_name] # adds template compile options for rendering fields field_options['template_options'] = options # adds generic field options field_options['tagName'] = tagName field_options['expression'] = expression field_options['type'] = field_options.get('widget', field.type) inherit_branding = options.get('inherit_branding', options.get('inherit_branding_auto') and record.check_access_rights('write', False)) field_options['inherit_branding'] = inherit_branding translate = options.get('edit_translations') and options.get('translatable') and field.translate field_options['translate'] = translate # field converter model = 'ir.qweb.field.' + field_options['type'] converter = self.env[model] if model in self.env else self.env['ir.qweb.field'] # get content content = converter.record_to_html(record, field_name, field_options) attributes = converter.attributes(record, field_name, field_options, values) return (attributes, content, inherit_branding or translate) def _get_widget(self, value, expression, tagName, field_options, options, values): # adds template compile options for rendering fields field_options['template_options'] = options field_options['type'] = field_options['widget'] field_options['tagName'] = tagName field_options['expression'] = expression # field converter model = 'ir.qweb.field.' + field_options['type'] converter = self.env[model] if model in self.env else self.env['ir.qweb.field'] # get content content = converter.value_to_html(value, field_options) attributes = OrderedDict() attributes['data-oe-type'] = field_options['type'] attributes['data-oe-expression'] = field_options['expression'] return (attributes, content, None) # compile expression add safe_eval def _compile_expr(self, expr): """ Compiles a purported Python expression to ast, verifies that it's safe (according to safe_eval's semantics) and alter its variable references to access values data instead """ # string must be stripped otherwise whitespace before the start for # formatting purpose are going to break parse/compile st = ast.parse(expr.strip(), mode='eval') assert_valid_codeobj( _SAFE_OPCODES, compile(st, '<>', 'eval'), # could be expr, but eval *should* be fine expr ) # ast.Expression().body -> expr return Contextifier(_BUILTINS).visit(st).body def _get_attr_bool(self, attr, default=False): if attr: if attr is True: return ast.Constant(True) attr = attr.lower() if attr in ('false', '0'): return ast.Constant(False) elif attr in ('true', '1'): return ast.Constant(True) return ast.Constant(attr if attr is False else bool(default))
unknown
codeparrot/codeparrot-clean
''' # This script is an example of calculation of long range interactions # (Coulomb interaction) using the Ewald summation and the P3M methods. # # Initially, the simple cubic structure is generated in order to represent the # NaCl crystal. Then the energy and forces are calculated and compared using both # the Ewald summation and the P3M. At the end the Madelung constant of NaCl crystal # is calculated. # # At the moment there is only metallic surrounding media is possible. # Parameters: # Ewald summation: alpha = 1.112583061 (Ewald parameter) rspacecutoff = 4.9 (the cutoff in real space) kspacecutoff = 30 (the cutoff in reciprocal space) # P3M: M = (16, 16, 16) (mesh) P = 7 (charge assignment order) ''' # The script itself import mpi4py.MPI as MPI import espresso from espresso import Real3D # initial parameters N = 16 # number of particles on lattice site num_particles = N**3 # total number of particles rho = 0.03 # number density of particles, number of particles devided by volume # creating a cubic NaCl crystal #print 'Creating a simple cubic structure...' x, y, z, Lx, Ly, Lz = espresso.tools.init_cfg.lattice.createCubic(num_particles, rho, False) # creating the system box box = (Lx, Ly, Lz) print 'System box size: ', box print 'Number of particles = ', num_particles # Ewald summation parameters #alphaEwald = 1.112583061 # alpha - Ewald parameter alphaEwald = 0.660557 rspacecutoff = 4.9 # rspacecutoff - the cutoff in real space kspacecutoff = 30 # kspacecutoff - the cutoff in reciprocal space print 'Ewald parameters:' print 'alfa=%f, rcutoff=%f, kcutoff=%d' % (alphaEwald, rspacecutoff, kspacecutoff) # P3M parameters M = espresso.Int3D(64, 64, 64) P = 7 #alphaP3M = 1.112583061 # alpha - Ewald parameter alphaP3M = 0.660557 print 'P3M parameters:' print 'Mesh=', M,', charge assignment order=%d, alphaP3M=%lf' % ( P, alphaP3M) # a skin for Verlet list skin = 0.2 # Coulomb prefactor parameters bjerrumlength = 1.0 temperature = 1.0 coulomb_prefactor = bjerrumlength * temperature nodeGrid = espresso.tools.decomp.nodeGrid(MPI.COMM_WORLD.size) cellGrid = espresso.tools.decomp.cellGrid(box, nodeGrid, rspacecutoff, skin) print '' print 'density = %.4f' % (rho) print 'NodeGrid = %s' % (nodeGrid,) print 'CellGrid = %s' % (cellGrid,) print '' ''' Below two systems for Ewald summation and PPPM methods will be created. ''' ####################################################################################### # system for Ewald ####################################################################################### systemEwald = espresso.System() systemEwald.rng = espresso.esutil.RNG() systemEwald.bc = espresso.bc.OrthorhombicBC(systemEwald.rng, box) systemEwald.skin = skin systemEwald.storage = espresso.storage.DomainDecomposition(systemEwald, nodeGrid, cellGrid) ####################################################################################### # system for PPPM ####################################################################################### systemPPPM = espresso.System() systemPPPM.rng = espresso.esutil.RNG() systemPPPM.bc = espresso.bc.OrthorhombicBC(systemPPPM.rng, box) systemPPPM.skin = skin systemPPPM.storage = espresso.storage.DomainDecomposition(systemPPPM, nodeGrid, cellGrid) ####################################################################################### # adding particles props = ['id', 'pos', 'type', 'q'] new_particles = [] countX = countY = countZ = 0 for i in range(0, num_particles): # charge should be accordingly to NaCl crystall charge = pow(-1, countX + countY + countZ) part = [ i, Real3D(x[i], y[i], z[i]), 0, charge ] new_particles.append(part) countX += 1 if countX >= N: countX = 0 countY += 1 if countY >= N: countY = 0 countZ += 1 # adding particles to Ewald system systemEwald.storage.addParticles(new_particles, *props) systemEwald.storage.decompose() # adding particles to PPPM system systemPPPM.storage.addParticles(new_particles, *props) systemPPPM.storage.decompose() ## potentials and interactions ## # setting a Verlet list vlEwald = espresso.VerletList(systemEwald, rspacecutoff) vlPPPM = espresso.VerletList(systemPPPM, rspacecutoff) # real space interaction for Ewald system # R space part of electrostatic interaction coulombR_potEwald = espresso.interaction.CoulombRSpace(coulomb_prefactor, alphaEwald, rspacecutoff) # creating an interaction based on the Verlet list coulombR_intEwald = espresso.interaction.VerletListCoulombRSpace(vlEwald) # setting the potential for the interaction between particles of type 0 and 0 coulombR_intEwald.setPotential(type1=0, type2=0, potential = coulombR_potEwald) # adding the interaction to the system systemEwald.addInteraction(coulombR_intEwald) # real space interaction for PPPM system # R space part of electrostatic interaction coulombR_potP3M = espresso.interaction.CoulombRSpace(coulomb_prefactor, alphaP3M, rspacecutoff) # creating an interaction based on the Verlet list coulombR_intPPPM = espresso.interaction.VerletListCoulombRSpace(vlPPPM) # setting the potential for the interaction between particles of type 0 and 0 coulombR_intPPPM.setPotential(type1=0, type2=0, potential = coulombR_potP3M) # adding the interaction to the system systemPPPM.addInteraction(coulombR_intPPPM) # K space part of electrostatic interaction ewaldK_pot = espresso.interaction.CoulombKSpaceEwald(systemEwald, coulomb_prefactor, alphaEwald, kspacecutoff) # creating an interaction based on the Cell list for all particle interaction and potential in K space ewaldK_int = espresso.interaction.CellListCoulombKSpaceEwald(systemEwald.storage, ewaldK_pot) # adding the interaction to the system systemEwald.addInteraction(ewaldK_int) # PPPM system p3m_pot = espresso.interaction.CoulombKSpaceP3M( systemPPPM, coulomb_prefactor, alphaP3M, M, P, rspacecutoff) # creating the interaction based on the Cell list for all particle interaction and potential in K space p3m_int = espresso.interaction.CellListCoulombKSpaceP3M(systemPPPM.storage, p3m_pot) # adding the interaction to the system systemPPPM.addInteraction(p3m_int) ### Integrators for Ewald and PPPM # creating the integrator which based on the Verlet algorithm integratorEwald = espresso.integrator.VelocityVerlet(systemEwald) # seting the time step (it is not important here) integratorEwald.dt = 0.0001 # nothing will be changed in system, just forces will be calculated ones integratorEwald.run(0) # creating the integrator which based on the Verlet algorithm integratorPPPM = espresso.integrator.VelocityVerlet(systemPPPM) # seting the time step (it is not important here) integratorPPPM.dt = 0.0001 # nothing will be changed in system, just forces will be calculated ones integratorPPPM.run(0) # printing the particle id and force difference (x,y,z) for first 6 particles print ('\n Difference between forces calculated by Ewald summation and PPPM (first 6 particles)') print ('%3s %20s %20s %20s\n' % ('id', 'dfx', 'dfy', 'dfz')) #sock = espresso.tools.vmd.connect(systemPPPM) #espresso.tools.vmd.imd_positions(systemPPPM, sock) print_N = min(num_particles, 20) for j in range(0, print_N): print ( '%3d %3.17f %3.17f %3.17f' % (j, \ abs(systemEwald.storage.getParticle(j).f.x - systemPPPM.storage.getParticle(j).f.x), \ abs(systemEwald.storage.getParticle(j).f.y - systemPPPM.storage.getParticle(j).f.y), \ abs(systemEwald.storage.getParticle(j).f.z - systemPPPM.storage.getParticle(j).f.z)) ) print 'force:', systemPPPM.storage.getParticle(j).f, ' ', systemEwald.storage.getParticle(j).f # calculating the R space part of electrostatic energy energyEwaldR = coulombR_intEwald.computeEnergy() # calculating the K space part of electrostatic energy energyEwaldK = ewaldK_int.computeEnergy() # total energy (Ewald summation) enTotEwald = energyEwaldR + energyEwaldK # calculating the R space part of electrostatic energy energyPPPMR = coulombR_intPPPM.computeEnergy() # calculating the K space part of electrostatic energy energyPPPMK = p3m_int.computeEnergy() # total energy (PPPM) enTotPPPM = energyPPPMR + energyPPPMK # printing the total energy and the difference print 'Energy (Ewald summation): %5.16f Energy (PPPM): %5.16f\n' % (enTotEwald, enTotPPPM) print 'The difference in energy (Ewald - PPPM): %5.16f\n' % (enTotEwald-enTotPPPM) a = 2 * pow( Lx*Ly*Lz / num_particles , 1./3. ) madelung_NaCl = -1.747564594633182190636212035544397403481 print ("Madelung constant is: %14.10f\n" % (enTotEwald/num_particles * a)) print (" error: %e\n\n" % ( abs( abs( enTotPPPM/num_particles * a) - abs(madelung_NaCl))))
unknown
codeparrot/codeparrot-clean
from django.db import models class Component(models.Model): name = models.CharField(max_length=50, unique=True) photoUrl = models.URLField(null=True, blank=True) brand = models.ForeignKey( 'Brand', on_delete=models.CASCADE, ) class Brand(models.Model): name = models.CharField(max_length=50) class Processor(Component): frequency = models.FloatField() # Ghz cores = models.IntegerField() socket = models.ForeignKey( 'Socket', on_delete=models.CASCADE, ) class Motherboard(Component): ramSlots = models.IntegerField() # number of slots maxRam = models.IntegerField() # Go ramtype = models.ForeignKey( 'RamType', on_delete=models.CASCADE, ) ramfrequency = models.ManyToManyField("RamFrequency") # une carte mere est compatible avec plusieurs frequences de ram socket = models.ForeignKey( 'Socket', on_delete=models.CASCADE, ) pcitypes = models.ManyToManyField("PciType") # une carte mere peut avoir plusieurs slots PCI formfactor = models.ForeignKey( 'MotherBoardFormFactor', on_delete=models.CASCADE, ) class Socket(models.Model): name = models.CharField(max_length=10) def __str__(self): return "{{id: {}, name: {}}}".format(self.id, self.name) class Ram(Component): capacity = models.IntegerField() # Go par barrette, capacity * quantity = total memory quantity = models.IntegerField() # nombre de barrette ramtype = models.ForeignKey( 'RamType', on_delete=models.CASCADE, ) frequency = models.ForeignKey( 'RamFrequency', on_delete=models.CASCADE, ) class RamFrequency(models.Model): frequency = models.IntegerField() # Mhz class RamType(models.Model): typeName = models.CharField(max_length=10) # DDR2, DDR3, DDR4 class GraphicCard(Component): memory = models.IntegerField() # Mo pcitype = models.ForeignKey( 'PciType', on_delete=models.CASCADE, ) class PciType(models.Model): name = models.CharField(max_length=50) # PCI-E 3.0, PCI-E 2.0 class Case(Component): weight = models.FloatField() # in Kg width = models.IntegerField() # in mm height = models.IntegerField() # in mm depth = models.IntegerField() # in mm motherBoardFormFactors = models.ManyToManyField("MotherBoardFormFactor") # un boitier peut etre compatible avec plusieurs Carte mere powerSupplyFormFactor = models.ForeignKey( 'PowerSupplyFormFactor', on_delete=models.CASCADE, ) class MotherBoardFormFactor(models.Model): name = models.CharField(max_length=10) class PowerSupply(Component): watts = models.IntegerField() # in watt modular = models.BooleanField() factorForm = models.ForeignKey( 'PowerSupplyFormFactor', on_delete=models.CASCADE, ) class PowerSupplyFormFactor(models.Model): name = models.CharField(max_length=10) class HardDrive(Component): capacity = models.IntegerField() # Go hardDriveType = models.ForeignKey( 'HardDriveType', on_delete=models.CASCADE, ) class HardDriveType(models.Model): name = models.CharField(max_length=10) # SSD ou HDD
unknown
codeparrot/codeparrot-clean
# Next + Payload Serverless Demo This is a demo showing how to utilize `@payloadcms/next-payload` to deploy Payload serverlessly, in the same repo alongside of a Next.js app. ## Deploy your own [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/git/external?repository-url=https://github.com/vercel/next.js/tree/canary/examples/cms-payload&project-name=cms-payload&repository-name=cms-payload) ## How to use Execute [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app) with [npm](https://docs.npmjs.com/cli/init), [Yarn](https://yarnpkg.com/lang/en/docs/cli/create/), or [pnpm](https://pnpm.io) to bootstrap the example: ```bash npx create-next-app --example cms-payload cms-payload-app ``` ```bash yarn create next-app --example cms-payload cms-payload-app ``` ```bash pnpm create next-app --example cms-payload cms-payload-app ``` Deploy it to the cloud with [Vercel](https://vercel.com/new?utm_source=github&utm_medium=readme&utm_campaign=next-example) ([Documentation](https://nextjs.org/docs/deployment)). The only thing you need to do to deploy to Vercel is to ensure that you have a Mongo Atlas database connection string and an S3 bucket (if desired). Fill out the same environment variables that are shown in the `.env.example` with your own values, and then you're good to go! ### Developing locally To develop with this package locally, make sure you have the following required software: 1. MongoDB 2. Node + NPM / Yarn 3. An S3 bucket to store media (optional) ### Getting started Follow the steps below to spin up a local dev environment: 1. Clone the repo 2. Run `yarn` or `npm install` 3. Run `cp .env.example .env` and fill out all ENV variables as shown 4. Run `yarn dev` to start up the dev server From there, you can visit your admin panel via navigating to `http://localhost:3000/admin`. Go ahead and start working! ### Related examples - [AgilityCMS](/examples/cms-agilitycms) - [Builder.io](/examples/cms-builder-io) - [ButterCMS](/examples/cms-buttercms) - [Contentful](/examples/cms-contentful) - [Cosmic](/examples/cms-cosmic) - [DatoCMS](/examples/cms-datocms) - [DotCMS](/examples/cms-dotcms) - [Drupal](/examples/cms-drupal) - [Enterspeed](/examples/cms-enterspeed) - [Ghost](/examples/cms-ghost) - [GraphCMS](/examples/cms-graphcms) - [Kontent.ai](/examples/cms-kontent-ai) - [MakeSwift](/examples/cms-makeswift) - [Payload](/examples/cms-payload) - [Plasmic](/examples/cms-plasmic) - [Prepr](/examples/cms-prepr) - [Prismic](/examples/cms-prismic) - [Sanity](/examples/cms-sanity) - [Sitecore XM Cloud](/examples/cms-sitecore-xmcloud) - [Sitefinity](/examples/cms-sitefinity) - [Storyblok](/examples/cms-storyblok) - [TakeShape](/examples/cms-takeshape) - [Tina](/examples/cms-tina) - [Umbraco](/examples/cms-umbraco) - [Umbraco heartcore](/examples/cms-umbraco-heartcore) - [Webiny](/examples/cms-webiny) - [WordPress](/examples/cms-wordpress) - [Blog Starter](/examples/blog-starter)
unknown
github
https://github.com/vercel/next.js
examples/cms-payload/README.md
# -*- coding: utf-8 -*- """ Models for Student Identity Verification This is where we put any models relating to establishing the real-life identity of a student over a period of time. Right now, the only models are the abstract `PhotoVerification`, and its one concrete implementation `SoftwareSecurePhotoVerification`. The hope is to keep as much of the photo verification process as generic as possible. """ import functools import json import logging from datetime import datetime, timedelta from email.utils import formatdate import pytz import requests import uuid from lazy import lazy from opaque_keys.edx.keys import UsageKey from django.conf import settings from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.core.cache import cache from django.dispatch import receiver from django.db import models, transaction, IntegrityError from django.utils.translation import ugettext as _, ugettext_lazy from boto.s3.connection import S3Connection from boto.s3.key import Key from simple_history.models import HistoricalRecords from config_models.models import ConfigurationModel from course_modes.models import CourseMode from model_utils.models import StatusModel, TimeStampedModel from model_utils import Choices from lms.djangoapps.verify_student.ssencrypt import ( random_aes_key, encrypt_and_encode, generate_signed_message, rsa_encrypt ) from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule_django.models import CourseKeyField from microsite_configuration.templatetags.microsite import platform_name log = logging.getLogger(__name__) def generateUUID(): # pylint: disable=invalid-name """ Utility function; generates UUIDs """ return str(uuid.uuid4()) class VerificationException(Exception): pass def status_before_must_be(*valid_start_statuses): """ Helper decorator with arguments to make sure that an object with a `status` attribute is in one of a list of acceptable status states before a method is called. You could use it in a class definition like: @status_before_must_be("submitted", "approved", "denied") def refund_user(self, user_id): # Do logic here... If the object has a status that is not listed when the `refund_user` method is invoked, it will throw a `VerificationException`. This is just to avoid distracting boilerplate when looking at a Model that needs to go through a workflow process. """ def decorator_func(func): """ Decorator function that gets returned """ @functools.wraps(func) def with_status_check(obj, *args, **kwargs): if obj.status not in valid_start_statuses: exception_msg = ( u"Error calling {} {}: status is '{}', must be one of: {}" ).format(func, obj, obj.status, valid_start_statuses) raise VerificationException(exception_msg) return func(obj, *args, **kwargs) return with_status_check return decorator_func class PhotoVerification(StatusModel): """ Each PhotoVerification represents a Student's attempt to establish their identity by uploading a photo of themselves and a picture ID. An attempt actually has a number of fields that need to be filled out at different steps of the approval process. While it's useful as a Django Model for the querying facilities, **you should only edit a `PhotoVerification` object through the methods provided**. Initialize them with a user: attempt = PhotoVerification(user=user) We track this attempt through various states: `created` Initial creation and state we're in after uploading the images. `ready` The user has uploaded their images and checked that they can read the images. There's a separate state here because it may be the case that we don't actually submit this attempt for review until payment is made. `submitted` Submitted for review. The review may be done by a staff member or an external service. The user cannot make changes once in this state. `must_retry` We submitted this, but there was an error on submission (i.e. we did not get a 200 when we POSTed to Software Secure) `approved` An admin or an external service has confirmed that the user's photo and photo ID match up, and that the photo ID's name matches the user's. `denied` The request has been denied. See `error_msg` for details on why. An admin might later override this and change to `approved`, but the student cannot re-open this attempt -- they have to create another attempt and submit it instead. Because this Model inherits from StatusModel, we can also do things like:: attempt.status == PhotoVerification.STATUS.created attempt.status == "created" pending_requests = PhotoVerification.submitted.all() """ ######################## Fields Set During Creation ######################## # See class docstring for description of status states STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied') user = models.ForeignKey(User, db_index=True) # They can change their name later on, so we want to copy the value here so # we always preserve what it was at the time they requested. We only copy # this value during the mark_ready() step. Prior to that, you should be # displaying the user's name from their user.profile.name. name = models.CharField(blank=True, max_length=255) # Where we place the uploaded image files (e.g. S3 URLs) face_image_url = models.URLField(blank=True, max_length=255) photo_id_image_url = models.URLField(blank=True, max_length=255) # Randomly generated UUID so that external services can post back the # results of checking a user's photo submission without use exposing actual # user IDs or something too easily guessable. receipt_id = models.CharField( db_index=True, default=generateUUID, max_length=255, ) created_at = models.DateTimeField(auto_now_add=True, db_index=True) updated_at = models.DateTimeField(auto_now=True, db_index=True) # Indicates whether or not a user wants to see the verification status # displayed on their dash. Right now, only relevant for allowing students # to "dismiss" a failed midcourse reverification message # TODO: This field is deprecated. display = models.BooleanField(db_index=True, default=True) ######################## Fields Set When Submitting ######################## submitted_at = models.DateTimeField(null=True, db_index=True) #################### Fields Set During Approval/Denial ##################### # If the review was done by an internal staff member, mark who it was. reviewing_user = models.ForeignKey( User, db_index=True, default=None, null=True, related_name="photo_verifications_reviewed" ) # Mark the name of the service used to evaluate this attempt (e.g # Software Secure). reviewing_service = models.CharField(blank=True, max_length=255) # If status is "denied", this should contain text explaining why. error_msg = models.TextField(blank=True) # Non-required field. External services can add any arbitrary codes as time # goes on. We don't try to define an exhuastive list -- this is just # capturing it so that we can later query for the common problems. error_code = models.CharField(blank=True, max_length=50) class Meta(object): app_label = "verify_student" abstract = True ordering = ['-created_at'] ##### Methods listed in the order you'd typically call them @classmethod def _earliest_allowed_date(cls): """ Returns the earliest allowed date given the settings """ days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"] return datetime.now(pytz.UTC) - timedelta(days=days_good_for) @classmethod def user_is_verified(cls, user, earliest_allowed_date=None): """ Return whether or not a user has satisfactorily proved their identity. Depending on the policy, this can expire after some period of time, so a user might have to renew periodically. This will check for the user's *initial* verification. """ return cls.objects.filter( user=user, status="approved", created_at__gte=(earliest_allowed_date or cls._earliest_allowed_date()) ).exists() @classmethod def verification_valid_or_pending(cls, user, earliest_allowed_date=None, queryset=None): """ Check whether the user has a complete verification attempt that is or *might* be good. This means that it's approved, been submitted, or would have been submitted but had an non-user error when it was being submitted. It's basically any situation in which the user has signed off on the contents of the attempt, and we have not yet received a denial. This will check for the user's *initial* verification. Arguments: user: earliest_allowed_date: earliest allowed date given in the settings queryset: If a queryset is provided, that will be used instead of hitting the database. Returns: queryset: queryset of 'PhotoVerification' sorted by 'created_at' in descending order. """ valid_statuses = ['submitted', 'approved', 'must_retry'] if queryset is None: queryset = cls.objects.filter(user=user) return queryset.filter( status__in=valid_statuses, created_at__gte=( earliest_allowed_date or cls._earliest_allowed_date() ) ).order_by('-created_at') @classmethod def user_has_valid_or_pending(cls, user, earliest_allowed_date=None, queryset=None): """ Check whether the user has an active or pending verification attempt Returns: bool: True or False according to existence of valid verifications """ return cls.verification_valid_or_pending(user, earliest_allowed_date, queryset).exists() @classmethod def active_for_user(cls, user): """ Return the most recent PhotoVerification that is marked ready (i.e. the user has said they're set, but we haven't submitted anything yet). This checks for the original verification. """ # This should only be one at the most, but just in case we create more # by mistake, we'll grab the most recently created one. active_attempts = cls.objects.filter(user=user, status='ready').order_by('-created_at') if active_attempts: return active_attempts[0] else: return None @classmethod def user_status(cls, user): """ Returns the status of the user based on their past verification attempts If no such verification exists, returns 'none' If verification has expired, returns 'expired' If the verification has been approved, returns 'approved' If the verification process is still ongoing, returns 'pending' If the verification has been denied and the user must resubmit photos, returns 'must_reverify' This checks initial verifications """ status = 'none' error_msg = '' if cls.user_is_verified(user): status = 'approved' elif cls.user_has_valid_or_pending(user): # user_has_valid_or_pending does include 'approved', but if we are # here, we know that the attempt is still pending status = 'pending' else: # we need to check the most recent attempt to see if we need to ask them to do # a retry try: attempts = cls.objects.filter(user=user).order_by('-updated_at') attempt = attempts[0] except IndexError: # we return 'none' return ('none', error_msg) if attempt.created_at < cls._earliest_allowed_date(): return ( 'expired', _("Your {platform_name} verification has expired.").format(platform_name=platform_name()) ) # If someone is denied their original verification attempt, they can try to reverify. if attempt.status == 'denied': status = 'must_reverify' if attempt.error_msg: error_msg = attempt.parsed_error_msg() return (status, error_msg) @classmethod def verification_for_datetime(cls, deadline, candidates): """Find a verification in a set that applied during a particular datetime. A verification is considered "active" during a datetime if: 1) The verification was created before the datetime, and 2) The verification is set to expire after the datetime. Note that verification status is *not* considered here, just the start/expire dates. If multiple verifications were active at the deadline, returns the most recently created one. Arguments: deadline (datetime): The datetime at which the verification applied. If `None`, then return the most recently created candidate. candidates (list of `PhotoVerification`s): Potential verifications to search through. Returns: PhotoVerification: A photo verification that was active at the deadline. If no verification was active, return None. """ if len(candidates) == 0: return None # If there's no deadline, then return the most recently created verification if deadline is None: return candidates[0] # Otherwise, look for a verification that was in effect at the deadline, # preferring recent verifications. # If no such verification is found, implicitly return `None` for verification in candidates: if verification.active_at_datetime(deadline): return verification @property def expiration_datetime(self): """Datetime that the verification will expire. """ days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"] return self.created_at + timedelta(days=days_good_for) def active_at_datetime(self, deadline): """Check whether the verification was active at a particular datetime. Arguments: deadline (datetime): The date at which the verification was active (created before and expired after). Returns: bool """ return ( self.created_at < deadline and self.expiration_datetime > deadline ) def parsed_error_msg(self): """ Sometimes, the error message we've received needs to be parsed into something more human readable The default behavior is to return the current error message as is. """ return self.error_msg @status_before_must_be("created") def upload_face_image(self, img): raise NotImplementedError @status_before_must_be("created") def upload_photo_id_image(self, img): raise NotImplementedError @status_before_must_be("created") def mark_ready(self): """ Mark that the user data in this attempt is correct. In order to succeed, the user must have uploaded the necessary images (`face_image_url`, `photo_id_image_url`). This method will also copy their name from their user profile. Prior to marking it ready, we read this value directly from their profile, since they're free to change it. This often happens because people put in less formal versions of their name on signup, but realize they want something different to go on a formal document. Valid attempt statuses when calling this method: `created` Status after method completes: `ready` Other fields that will be set by this method: `name` State Transitions: `created` → `ready` This is what happens when the user confirms to us that the pictures they uploaded are good. Note that we don't actually do a submission anywhere yet. """ # At any point prior to this, they can change their names via their # student dashboard. But at this point, we lock the value into the # attempt. self.name = self.user.profile.name self.status = "ready" self.save() @status_before_must_be("must_retry", "submitted", "approved", "denied") def approve(self, user_id=None, service=""): """ Approve this attempt. `user_id` Valid attempt statuses when calling this method: `submitted`, `approved`, `denied` Status after method completes: `approved` Other fields that will be set by this method: `reviewed_by_user_id`, `reviewed_by_service`, `error_msg` State Transitions: `submitted` → `approved` This is the usual flow, whether initiated by a staff user or an external validation service. `approved` → `approved` No-op. First one to approve it wins. `denied` → `approved` This might happen if a staff member wants to override a decision made by an external service or another staff member (say, in response to a support request). In this case, the previous values of `reviewed_by_user_id` and `reviewed_by_service` will be changed to whoever is doing the approving, and `error_msg` will be reset. The only record that this record was ever denied would be in our logs. This should be a relatively rare occurence. """ # If someone approves an outdated version of this, the first one wins if self.status == "approved": return log.info(u"Verification for user '{user_id}' approved by '{reviewer}'.".format( user_id=self.user, reviewer=user_id )) self.error_msg = "" # reset, in case this attempt was denied before self.error_code = "" # reset, in case this attempt was denied before self.reviewing_user = user_id self.reviewing_service = service self.status = "approved" self.save() @status_before_must_be("must_retry", "submitted", "approved", "denied") def deny(self, error_msg, error_code="", reviewing_user=None, reviewing_service=""): """ Deny this attempt. Valid attempt statuses when calling this method: `submitted`, `approved`, `denied` Status after method completes: `denied` Other fields that will be set by this method: `reviewed_by_user_id`, `reviewed_by_service`, `error_msg`, `error_code` State Transitions: `submitted` → `denied` This is the usual flow, whether initiated by a staff user or an external validation service. `approved` → `denied` This might happen if a staff member wants to override a decision made by an external service or another staff member, or just correct a mistake made during the approval process. In this case, the previous values of `reviewed_by_user_id` and `reviewed_by_service` will be changed to whoever is doing the denying. The only record that this record was ever approved would be in our logs. This should be a relatively rare occurence. `denied` → `denied` Update the error message and reviewing_user/reviewing_service. Just lets you amend the error message in case there were additional details to be made. """ log.info(u"Verification for user '{user_id}' denied by '{reviewer}'.".format( user_id=self.user, reviewer=reviewing_user )) self.error_msg = error_msg self.error_code = error_code self.reviewing_user = reviewing_user self.reviewing_service = reviewing_service self.status = "denied" self.save() @status_before_must_be("must_retry", "submitted", "approved", "denied") def system_error(self, error_msg, error_code="", reviewing_user=None, reviewing_service=""): """ Mark that this attempt could not be completed because of a system error. Status should be moved to `must_retry`. For example, if Software Secure reported to us that they couldn't process our submission because they couldn't decrypt the image we sent. """ if self.status in ["approved", "denied"]: return # If we were already approved or denied, just leave it. self.error_msg = error_msg self.error_code = error_code self.reviewing_user = reviewing_user self.reviewing_service = reviewing_service self.status = "must_retry" self.save() class SoftwareSecurePhotoVerification(PhotoVerification): """ Model to verify identity using a service provided by Software Secure. Much of the logic is inherited from `PhotoVerification`, but this class encrypts the photos. Software Secure (http://www.softwaresecure.com/) is a remote proctoring service that also does identity verification. A student uses their webcam to upload two images: one of their face, one of a photo ID. Due to the sensitive nature of the data, the following security precautions are taken: 1. The snapshot of their face is encrypted using AES-256 in CBC mode. All face photos are encypted with the same key, and this key is known to both Software Secure and edx-platform. 2. The snapshot of a user's photo ID is also encrypted using AES-256, but the key is randomly generated using pycrypto's Random. Every verification attempt has a new key. The AES key is then encrypted using a public key provided by Software Secure. We store only the RSA-encryped AES key. Since edx-platform does not have Software Secure's private RSA key, it means that we can no longer even read photo ID. 3. The encrypted photos are base64 encoded and stored in an S3 bucket that edx-platform does not have read access to. Note: this model handles *inital* verifications (which you must perform at the time you register for a verified cert). """ # This is a base64.urlsafe_encode(rsa_encrypt(photo_id_aes_key), ss_pub_key) # So first we generate a random AES-256 key to encrypt our photo ID with. # Then we RSA encrypt it with Software Secure's public key. Then we base64 # encode that. The result is saved here. Actual expected length is 344. photo_id_key = models.TextField(max_length=1024) IMAGE_LINK_DURATION = 5 * 60 * 60 * 24 # 5 days in seconds copy_id_photo_from = models.ForeignKey("self", null=True, blank=True) @classmethod def get_initial_verification(cls, user, earliest_allowed_date=None): """Get initial verification for a user with the 'photo_id_key'. Arguments: user(User): user object earliest_allowed_date(datetime): override expiration date for initial verification Return: SoftwareSecurePhotoVerification (object) or None """ init_verification = cls.objects.filter( user=user, status__in=["submitted", "approved"], created_at__gte=( earliest_allowed_date or cls._earliest_allowed_date() ) ).exclude(photo_id_key='') return init_verification.latest('created_at') if init_verification.exists() else None @status_before_must_be("created") def upload_face_image(self, img_data): """ Upload an image of the user's face to S3. `img_data` should be a raw bytestream of a PNG image. This method will take the data, encrypt it using our FACE_IMAGE_AES_KEY, encode it with base64 and save it to S3. Yes, encoding it to base64 adds compute and disk usage without much real benefit, but that's what the other end of this API is expecting to get. """ # Skip this whole thing if we're running acceptance tests or if we're # developing and aren't interested in working on student identity # verification functionality. If you do want to work on it, you have to # explicitly enable these in your private settings. if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'): return aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"] aes_key = aes_key_str.decode("hex") s3_key = self._generate_s3_key("face") s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key)) @status_before_must_be("created") def upload_photo_id_image(self, img_data): """ Upload an the user's photo ID image to S3. `img_data` should be a raw bytestream of a PNG image. This method will take the data, encrypt it using a randomly generated AES key, encode it with base64 and save it to S3. The random key is also encrypted using Software Secure's public RSA key and stored in our `photo_id_key` field. Yes, encoding it to base64 adds compute and disk usage without much real benefit, but that's what the other end of this API is expecting to get. """ # Skip this whole thing if we're running acceptance tests or if we're # developing and aren't interested in working on student identity # verification functionality. If you do want to work on it, you have to # explicitly enable these in your private settings. if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'): # fake photo id key is set only for initial verification self.photo_id_key = 'fake-photo-id-key' self.save() return aes_key = random_aes_key() rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"] rsa_encrypted_aes_key = rsa_encrypt(aes_key, rsa_key_str) # Upload this to S3 s3_key = self._generate_s3_key("photo_id") s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key)) # Update our record fields self.photo_id_key = rsa_encrypted_aes_key.encode('base64') self.save() @status_before_must_be("must_retry", "ready", "submitted") def submit(self, copy_id_photo_from=None): """ Submit our verification attempt to Software Secure for validation. This will set our status to "submitted" if the post is successful, and "must_retry" if the post fails. Keyword Arguments: copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo data from this attempt. This is used for reverification, in which new face photos are sent with previously-submitted ID photos. """ try: response = self.send_request(copy_id_photo_from=copy_id_photo_from) if response.ok: self.submitted_at = datetime.now(pytz.UTC) self.status = "submitted" self.save() else: self.status = "must_retry" self.error_msg = response.text self.save() except Exception as error: log.exception(error) self.status = "must_retry" self.save() def parsed_error_msg(self): """ Parse the error messages we receive from SoftwareSecure Error messages are written in the form: `[{"photoIdReasons": ["Not provided"]}]` Returns a list of error messages """ # Translates the category names and messages into something more human readable message_dict = { ("photoIdReasons", "Not provided"): _("No photo ID was provided."), ("photoIdReasons", "Text not clear"): _("We couldn't read your name from your photo ID image."), ("generalReasons", "Name mismatch"): _("The name associated with your account and the name on your ID do not match."), ("userPhotoReasons", "Image not clear"): _("The image of your face was not clear."), ("userPhotoReasons", "Face out of view"): _("Your face was not visible in your self-photo."), } try: msg_json = json.loads(self.error_msg) msg_dict = msg_json[0] msg = [] for category in msg_dict: # find the messages associated with this category category_msgs = msg_dict[category] for category_msg in category_msgs: msg.append(message_dict[(category, category_msg)]) return u", ".join(msg) except (ValueError, KeyError): # if we can't parse the message as JSON or the category doesn't # match one of our known categories, show a generic error log.error('PhotoVerification: Error parsing this error message: %s', self.error_msg) return _("There was an error verifying your ID photos.") def image_url(self, name, override_receipt_id=None): """ We dynamically generate this, since we want it the expiration clock to start when the message is created, not when the record is created. Arguments: name (str): Name of the image (e.g. "photo_id" or "face") Keyword Arguments: override_receipt_id (str): If provided, use this receipt ID instead of the ID for this attempt. This is useful for reverification where we need to construct a URL to a previously-submitted photo ID image. Returns: string: The expiring URL for the image. """ s3_key = self._generate_s3_key(name, override_receipt_id=override_receipt_id) return s3_key.generate_url(self.IMAGE_LINK_DURATION) def _generate_s3_key(self, prefix, override_receipt_id=None): """ Generates a key for an s3 bucket location Example: face/4dd1add9-6719-42f7-bea0-115c008c4fca """ conn = S3Connection( settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_ACCESS_KEY"], settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_SECRET_KEY"] ) bucket = conn.get_bucket(settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["S3_BUCKET"]) # Override the receipt ID if one is provided. # This allow us to construct S3 keys to images submitted in previous attempts # (used for reverification, where we send a new face photo with the same photo ID # from a previous attempt). receipt_id = self.receipt_id if override_receipt_id is None else override_receipt_id key = Key(bucket) key.key = "{}/{}".format(prefix, receipt_id) return key def _encrypted_user_photo_key_str(self): """ Software Secure needs to have both UserPhoto and PhotoID decrypted in the same manner. So even though this is going to be the same for every request, we're also using RSA encryption to encrypt the AES key for faces. """ face_aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"] face_aes_key = face_aes_key_str.decode("hex") rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"] rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str) return rsa_encrypted_face_aes_key.encode("base64") def create_request(self, copy_id_photo_from=None): """ Construct the HTTP request to the photo verification service. Keyword Arguments: copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo data from this attempt. This is used for reverification, in which new face photos are sent with previously-submitted ID photos. Returns: tuple of (header, body), where both `header` and `body` are dictionaries. """ access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"] secret_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"] scheme = "https" if settings.HTTPS == "on" else "http" callback_url = "{}://{}{}".format( scheme, settings.SITE_NAME, reverse('verify_student_results_callback') ) # If we're copying the photo ID image from a previous verification attempt, # then we need to send the old image data with the correct image key. photo_id_url = ( self.image_url("photo_id") if copy_id_photo_from is None else self.image_url("photo_id", override_receipt_id=copy_id_photo_from.receipt_id) ) photo_id_key = ( self.photo_id_key if copy_id_photo_from is None else copy_id_photo_from.photo_id_key ) body = { "EdX-ID": str(self.receipt_id), "ExpectedName": self.name, "PhotoID": photo_id_url, "PhotoIDKey": photo_id_key, "SendResponseTo": callback_url, "UserPhoto": self.image_url("face"), "UserPhotoKey": self._encrypted_user_photo_key_str(), } headers = { "Content-Type": "application/json", "Date": formatdate(timeval=None, localtime=False, usegmt=True) } _message, _sig, authorization = generate_signed_message( "POST", headers, body, access_key, secret_key ) headers['Authorization'] = authorization return headers, body def request_message_txt(self): """ This is the body of the request we send across. This is never actually used in the code, but exists for debugging purposes -- you can call `print attempt.request_message_txt()` on the console and get a readable rendering of the request that would be sent across, without actually sending anything. """ headers, body = self.create_request() header_txt = "\n".join( "{}: {}".format(h, v) for h, v in sorted(headers.items()) ) body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8') return header_txt + "\n\n" + body_txt def send_request(self, copy_id_photo_from=None): """ Assembles a submission to Software Secure and sends it via HTTPS. Keyword Arguments: copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo data from this attempt. This is used for reverification, in which new face photos are sent with previously-submitted ID photos. Returns: request.Response """ # If AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING is True, we want to # skip posting anything to Software Secure. We actually don't even # create the message because that would require encryption and message # signing that rely on settings.VERIFY_STUDENT values that aren't set # in dev. So we just pretend like we successfully posted if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'): fake_response = requests.Response() fake_response.status_code = 200 return fake_response headers, body = self.create_request(copy_id_photo_from=copy_id_photo_from) response = requests.post( settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"], headers=headers, data=json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'), verify=False ) log.info("Sent request to Software Secure for receipt ID %s.", self.receipt_id) if copy_id_photo_from is not None: log.info( ( "Software Secure attempt with receipt ID %s used the same photo ID " "data as the receipt with ID %s" ), self.receipt_id, copy_id_photo_from.receipt_id ) log.debug("Headers:\n{}\n\n".format(headers)) log.debug("Body:\n{}\n\n".format(body)) log.debug("Return code: {}".format(response.status_code)) log.debug("Return message:\n\n{}\n\n".format(response.text)) return response @classmethod def verification_status_for_user(cls, user, course_id, user_enrollment_mode): """ Returns the verification status for use in grade report. """ if user_enrollment_mode not in CourseMode.VERIFIED_MODES: return 'N/A' user_is_verified = cls.user_is_verified(user) if not user_is_verified: return 'Not ID Verified' else: return 'ID Verified' class VerificationDeadline(TimeStampedModel): """ Represent a verification deadline for a particular course. The verification deadline is the datetime after which users are no longer allowed to submit photos for initial verification in a course. Note that this is NOT the same as the "upgrade" deadline, after which a user is no longer allowed to upgrade to a verified enrollment. If no verification deadline record exists for a course, then that course does not have a deadline. This means that users can submit photos at any time. """ class Meta(object): app_label = "verify_student" course_key = CourseKeyField( max_length=255, db_index=True, unique=True, help_text=ugettext_lazy(u"The course for which this deadline applies"), ) deadline = models.DateTimeField( help_text=ugettext_lazy( u"The datetime after which users are no longer allowed " u"to submit photos for verification." ) ) # The system prefers to set this automatically based on default settings. But # if the field is set manually we want a way to indicate that so we don't # overwrite the manual setting of the field. deadline_is_explicit = models.BooleanField(default=False) # Maintain a history of changes to deadlines for auditing purposes history = HistoricalRecords() ALL_DEADLINES_CACHE_KEY = "verify_student.all_verification_deadlines" @classmethod def set_deadline(cls, course_key, deadline, is_explicit=False): """ Configure the verification deadline for a course. If `deadline` is `None`, then the course will have no verification deadline. In this case, users will be able to verify for the course at any time. Arguments: course_key (CourseKey): Identifier for the course. deadline (datetime or None): The verification deadline. """ if deadline is None: VerificationDeadline.objects.filter(course_key=course_key).delete() else: record, created = VerificationDeadline.objects.get_or_create( course_key=course_key, defaults={"deadline": deadline, "deadline_is_explicit": is_explicit} ) if not created: record.deadline = deadline record.deadline_is_explicit = is_explicit record.save() @classmethod def deadlines_for_courses(cls, course_keys): """ Retrieve verification deadlines for particular courses. Arguments: course_keys (list): List of `CourseKey`s. Returns: dict: Map of course keys to datetimes (verification deadlines) """ all_deadlines = cache.get(cls.ALL_DEADLINES_CACHE_KEY) if all_deadlines is None: all_deadlines = { deadline.course_key: deadline.deadline for deadline in VerificationDeadline.objects.all() } cache.set(cls.ALL_DEADLINES_CACHE_KEY, all_deadlines) return { course_key: all_deadlines[course_key] for course_key in course_keys if course_key in all_deadlines } @classmethod def deadline_for_course(cls, course_key): """ Retrieve the verification deadline for a particular course. Arguments: course_key (CourseKey): The identifier for the course. Returns: datetime or None """ try: deadline = cls.objects.get(course_key=course_key) return deadline.deadline except cls.DoesNotExist: return None @receiver(models.signals.post_save, sender=VerificationDeadline) @receiver(models.signals.post_delete, sender=VerificationDeadline) def invalidate_deadline_caches(sender, **kwargs): # pylint: disable=unused-argument """Invalidate the cached verification deadline information. """ cache.delete(VerificationDeadline.ALL_DEADLINES_CACHE_KEY) class VerificationCheckpoint(models.Model): """Represents a point at which a user is asked to re-verify his/her identity. Each checkpoint is uniquely identified by a (course_id, checkpoint_location) tuple. """ course_id = CourseKeyField(max_length=255, db_index=True) checkpoint_location = models.CharField(max_length=255) photo_verification = models.ManyToManyField(SoftwareSecurePhotoVerification) class Meta(object): app_label = "verify_student" unique_together = ('course_id', 'checkpoint_location') def __unicode__(self): """ Unicode representation of the checkpoint. """ return u"{checkpoint} in {course}".format( checkpoint=self.checkpoint_name, course=self.course_id ) @lazy def checkpoint_name(self): """Lazy method for getting checkpoint name of reverification block. Return location of the checkpoint if no related assessment found in database. """ checkpoint_key = UsageKey.from_string(self.checkpoint_location) try: checkpoint_name = modulestore().get_item(checkpoint_key).related_assessment except ItemNotFoundError: log.warning( u"Verification checkpoint block with location '%s' and course id '%s' " u"not found in database.", self.checkpoint_location, unicode(self.course_id) ) checkpoint_name = self.checkpoint_location return checkpoint_name def add_verification_attempt(self, verification_attempt): """Add the verification attempt in M2M relation of photo_verification. Arguments: verification_attempt(object): SoftwareSecurePhotoVerification object Returns: None """ self.photo_verification.add(verification_attempt) # pylint: disable=no-member def get_user_latest_status(self, user_id): """Get the status of the latest checkpoint attempt of the given user. Args: user_id(str): Id of user Returns: VerificationStatus object if found any else None """ try: return self.checkpoint_status.filter(user_id=user_id).latest() except ObjectDoesNotExist: return None @classmethod def get_or_create_verification_checkpoint(cls, course_id, checkpoint_location): """ Get or create the verification checkpoint for given 'course_id' and checkpoint name. Arguments: course_id (CourseKey): CourseKey checkpoint_location (str): Verification checkpoint location Raises: IntegrityError if create fails due to concurrent create. Returns: VerificationCheckpoint object if exists otherwise None """ with transaction.atomic(): checkpoint, __ = cls.objects.get_or_create(course_id=course_id, checkpoint_location=checkpoint_location) return checkpoint class VerificationStatus(models.Model): """This model is an append-only table that represents user status changes during the verification process. A verification status represents a user’s progress through the verification process for a particular checkpoint. """ SUBMITTED_STATUS = "submitted" APPROVED_STATUS = "approved" DENIED_STATUS = "denied" ERROR_STATUS = "error" VERIFICATION_STATUS_CHOICES = ( (SUBMITTED_STATUS, SUBMITTED_STATUS), (APPROVED_STATUS, APPROVED_STATUS), (DENIED_STATUS, DENIED_STATUS), (ERROR_STATUS, ERROR_STATUS) ) checkpoint = models.ForeignKey(VerificationCheckpoint, related_name="checkpoint_status") user = models.ForeignKey(User) status = models.CharField(choices=VERIFICATION_STATUS_CHOICES, db_index=True, max_length=32) timestamp = models.DateTimeField(auto_now_add=True) response = models.TextField(null=True, blank=True) error = models.TextField(null=True, blank=True) class Meta(object): app_label = "verify_student" get_latest_by = "timestamp" verbose_name = "Verification Status" verbose_name_plural = "Verification Statuses" @classmethod def add_verification_status(cls, checkpoint, user, status): """Create new verification status object. Arguments: checkpoint(VerificationCheckpoint): VerificationCheckpoint object user(User): user object status(str): Status from VERIFICATION_STATUS_CHOICES Returns: None """ cls.objects.create(checkpoint=checkpoint, user=user, status=status) @classmethod def add_status_from_checkpoints(cls, checkpoints, user, status): """Create new verification status objects for a user against the given checkpoints. Arguments: checkpoints(list): list of VerificationCheckpoint objects user(User): user object status(str): Status from VERIFICATION_STATUS_CHOICES Returns: None """ for checkpoint in checkpoints: cls.objects.create(checkpoint=checkpoint, user=user, status=status) @classmethod def get_user_status_at_checkpoint(cls, user, course_key, location): """ Get the user's latest status at the checkpoint. Arguments: user (User): The user whose status we are retrieving. course_key (CourseKey): The identifier for the course. location (UsageKey): The location of the checkpoint in the course. Returns: unicode or None """ try: return cls.objects.filter( user=user, checkpoint__course_id=course_key, checkpoint__checkpoint_location=unicode(location), ).latest().status except cls.DoesNotExist: return None @classmethod def get_user_attempts(cls, user_id, course_key, checkpoint_location): """ Get re-verification attempts against a user for a given 'checkpoint' and 'course_id'. Arguments: user_id (str): User Id string course_key (str): A CourseKey of a course checkpoint_location (str): Verification checkpoint location Returns: Count of re-verification attempts """ return cls.objects.filter( user_id=user_id, checkpoint__course_id=course_key, checkpoint__checkpoint_location=checkpoint_location, status=cls.SUBMITTED_STATUS ).count() @classmethod def get_location_id(cls, photo_verification): """Get the location ID of reverification XBlock. Args: photo_verification(object): SoftwareSecurePhotoVerification object Return: Location Id of XBlock if any else empty string """ try: verification_status = cls.objects.filter(checkpoint__photo_verification=photo_verification).latest() return verification_status.checkpoint.checkpoint_location except cls.DoesNotExist: return "" @classmethod def get_all_checkpoints(cls, user_id, course_key): """Return dict of all the checkpoints with their status. Args: user_id(int): Id of user. course_key(unicode): Unicode of course key Returns: dict: {checkpoint:status} """ all_checks_points = cls.objects.filter( user_id=user_id, checkpoint__course_id=course_key ) check_points = {} for check in all_checks_points: check_points[check.checkpoint.checkpoint_location] = check.status return check_points @classmethod def cache_key_name(cls, user_id, course_key): """Return the name of the key to use to cache the current configuration Args: user_id(int): Id of user. course_key(unicode): Unicode of course key Returns: Unicode cache key """ return u"verification.{}.{}".format(user_id, unicode(course_key)) @receiver(models.signals.post_save, sender=VerificationStatus) @receiver(models.signals.post_delete, sender=VerificationStatus) def invalidate_verification_status_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name """Invalidate the cache of VerificationStatus model. """ cache_key = VerificationStatus.cache_key_name( instance.user.id, unicode(instance.checkpoint.course_id) ) cache.delete(cache_key) # DEPRECATED: this feature has been permanently enabled. # Once the application code has been updated in production, # this table can be safely deleted. class InCourseReverificationConfiguration(ConfigurationModel): """Configure in-course re-verification. Enable or disable in-course re-verification feature. When this flag is disabled, the "in-course re-verification" feature will be disabled. When the flag is enabled, the "in-course re-verification" feature will be enabled. """ pass class IcrvStatusEmailsConfiguration(ConfigurationModel): """Toggle in-course reverification (ICRV) status emails Disabled by default. When disabled, ICRV status emails will not be sent. When enabled, ICRV status emails are sent. """ pass class SkippedReverification(models.Model): """Model for tracking skipped Reverification of a user against a specific course. If a user skipped a Reverification checkpoint for a specific course then in future that user cannot see the reverification link. """ user = models.ForeignKey(User) course_id = CourseKeyField(max_length=255, db_index=True) checkpoint = models.ForeignKey(VerificationCheckpoint, related_name="skipped_checkpoint") created_at = models.DateTimeField(auto_now_add=True) class Meta(object): app_label = "verify_student" unique_together = (('user', 'course_id'),) @classmethod @transaction.atomic def add_skipped_reverification_attempt(cls, checkpoint, user_id, course_id): """Create skipped reverification object. Arguments: checkpoint(VerificationCheckpoint): VerificationCheckpoint object user_id(str): User Id of currently logged in user course_id(CourseKey): CourseKey Returns: None """ cls.objects.create(checkpoint=checkpoint, user_id=user_id, course_id=course_id) @classmethod def check_user_skipped_reverification_exists(cls, user_id, course_id): """Check existence of a user's skipped re-verification attempt for a specific course. Arguments: user_id(str): user id course_id(CourseKey): CourseKey Returns: Boolean """ has_skipped = cls.objects.filter(user_id=user_id, course_id=course_id).exists() return has_skipped @classmethod def cache_key_name(cls, user_id, course_key): """Return the name of the key to use to cache the current configuration Arguments: user(User): user object course_key(CourseKey): CourseKey Returns: string: cache key name """ return u"skipped_reverification.{}.{}".format(user_id, unicode(course_key)) @receiver(models.signals.post_save, sender=SkippedReverification) @receiver(models.signals.post_delete, sender=SkippedReverification) def invalidate_skipped_verification_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name """Invalidate the cache of skipped verification model. """ cache_key = SkippedReverification.cache_key_name( instance.user.id, unicode(instance.course_id) ) cache.delete(cache_key)
unknown
codeparrot/codeparrot-clean
"""Jython bug with cell variables and yield""" from __future__ import generators def single_closure_single_value(): value = 0 a_closure = lambda : value yield a_closure() yield a_closure() def single_closure_multiple_values(): value = 0 a_closure = lambda : value yield a_closure() value = 1 yield a_closure() def multiple_closures_single_value(): value = 0 a_closure = lambda : value yield a_closure() a_closure = lambda : value yield a_closure() def multiple_closures_multiple_values(): value = 0 a_closure = lambda : value yield a_closure() value = 1 a_closure = lambda : value yield a_closure() tests={} for name in dir(): if 'closure' in name: test = eval(name) if name.endswith('single_value'): expected = [0,0] else: expected = [0,1] tests[test] = expected def test_main(verbose=None): from test.test_support import verify import sys for func in tests: expected = tests[func] result = list(func()) verify(result == expected, "%s: expected %s, got %s" % ( func.__name__, expected, result)) if __name__ == '__main__': test_main(1)
unknown
codeparrot/codeparrot-clean
from datetime import datetime, timedelta, timezone from pathlib import Path from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives.hashes import SHA256 from cryptography.hazmat.primitives.serialization import ( Encoding, NoEncryption, PrivateFormat, ) from cryptography.x509 import ( CertificateBuilder, DNSName, Name, NameAttribute, SubjectAlternativeName, random_serial_number, ) from cryptography.x509.oid import NameOID # https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate def generate_keys(): folder = Path(__file__).parent key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend(), ) (folder / "localhost.key").write_bytes( key.private_bytes( encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL, encryption_algorithm=NoEncryption(), ), ) subject = issuer = Name( [ NameAttribute(NameOID.COUNTRY_NAME, "IE"), NameAttribute(NameOID.ORGANIZATION_NAME, "Scrapy"), NameAttribute(NameOID.COMMON_NAME, "localhost"), ] ) cert = ( CertificateBuilder() .subject_name(subject) .issuer_name(issuer) .public_key(key.public_key()) .serial_number(random_serial_number()) .not_valid_before(datetime.now(tz=timezone.utc)) .not_valid_after(datetime.now(tz=timezone.utc) + timedelta(days=10)) .add_extension( SubjectAlternativeName([DNSName("localhost")]), critical=False, ) .sign(key, SHA256(), default_backend()) ) (folder / "localhost.crt").write_bytes(cert.public_bytes(Encoding.PEM))
python
github
https://github.com/scrapy/scrapy
tests/keys/__init__.py
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # vim: tabstop=4 shiftwidth=4 softtabstop=4 import unittest from nose.tools import * import binascii import inspect import json import logging import math import netaddr import os import signal import sys import time import traceback from random import randint from ryu import cfg # import all packet libraries. PKT_LIB_PATH = 'ryu.lib.packet' for modname, moddef in sys.modules.items(): if not modname.startswith(PKT_LIB_PATH) or not moddef: continue for (clsname, clsdef, ) in inspect.getmembers(moddef): if not inspect.isclass(clsdef): continue exec('from %s import %s' % (modname, clsname)) from ryu.base import app_manager from ryu.controller import handler from ryu.controller import ofp_event from ryu.controller.handler import set_ev_cls from ryu.exception import RyuException from ryu.lib import dpid as dpid_lib from ryu.lib import hub from ryu.lib import stringify from ryu.lib.packet import packet from ryu.ofproto import ofproto_protocol from ryu.ofproto import ofproto_v1_3 from ryu.ofproto import ofproto_v1_3_parser from ryu.ofproto import ofproto_v1_4 from ryu.tests.switch.tester import TestPatterns from ryu.tests.switch.tester import TestFile from ryu.tests.switch.tester import OfTester CONF = cfg.CONF LOG = logging.getLogger('test_tester') SAMPLE_DESC = "action: 00_OUTPUT" class Test_tester(unittest.TestCase): """ Test case for tester """ # action/00_OUTPUT.json test_json_1 = { "description": "ethernet/ipv4/tcp-->'actions=output:2'", "prerequisite": [ { "OFPFlowMod": { "table_id": 0, "instructions": [ { "OFPInstructionActions": { "actions": [ { "OFPActionOutput": { "port": "target_send_port_1" } } ], "type": 4 } } ] } } ], "tests": [ { "ingress": [ "ethernet(dst='22:22:22:22:22:22', \ src='12:11:11:11:11:11', ethertype=2048)", "ipv4(tos=32, proto=6, src='192.168.10.10', \ dst='192.168.20.20', ttl=64)", "tcp(dst_port=2222, option=str('\\x00' * 4), \ src_port=11111)", "'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x0\ 8\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x1\ 2\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1\ b\\x1c\\x1d\\x1e\\x1f'" ], "egress":[ "ethernet(dst='22:22:22:22:22:22', \ src='12:11:11:11:11:11', ethertype=2048)", "ipv4(tos=32, proto=6, src='192.168.10.10', \ dst='192.168.20.20', ttl=64)", "tcp(dst_port=2222, option=str('\\x00' * 4), \ src_port=11111)", "'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x0\ 8\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x1\ 2\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1\ b\\x1c\\x1d\\x1e\\x1f'" ] } ] } # group/00_ALL.json test_json_2 = { "description": "2Mbps(ethernet/ipv4/tcp)-->'in_port=1,\ actions=group:all(actions=output:2/actions=output:3)'", "prerequisite": [ { "OFPGroupMod": { "group_id": 0, "buckets": [ { "OFPBucket": { "actions": [ { "OFPActionOutput": { "port": "target_send_port_1" } } ] } }, { "OFPBucket": { "actions": [ { "OFPActionOutput": { "port": "target_send_port_2" } } ] } } ] } }, { "OFPFlowMod": { "match": { "OFPMatch": { "oxm_fields": [ { "OXMTlv": { "field": "in_port", "value": "target_recv_port" } } ] } }, "instructions": [ { "OFPInstructionActions": { "actions": [ { "OFPActionGroup": { "group_id": 0 } } ], "type": 4 } } ] } } ], "tests": [ { "ingress": { "packets": { "data": [ "ethernet(dst='22:22:22:22:22:22', \ src='12:11:11:11:11:11', ethertype=2048)", "ipv4(proto=6)", "tcp()", "str('\\x11' * (1500 - 54))" ], "pktps":175, "duration_time":30 } }, "egress":{ "throughput": [ { "OFPMatch": { "oxm_fields": [ { "OXMTlv": { "field": "in_port", "value": "tester_recv_port_1" } } ] }, "kbps": 2000 }, { "OFPMatch": { "oxm_fields": [ { "OXMTlv": { "field": "in_port", "value": "tester_recv_port_2" } } ] }, "kbps": 2000 } ] } } ] } # match/00_IN_PORT.json test_json_3 = { "description": "ethernet/ipv4/tcp-->'in_port=1,actions=output:2'", "prerequisite": [ { "OFPFlowMod": { "table_id": 0, "match": { "OFPMatch": { "oxm_fields": [ { "OXMTlv": { "field": "in_port", "value": "target_recv_port" } } ] } }, "instructions": [ { "OFPInstructionActions": { "actions": [ { "OFPActionOutput": { "port": "target_send_port_1" } } ], "type": 4 } } ] } } ], "tests": [ { "ingress": [ "ethernet(dst='22:22:22:22:22:22', \ src='12:11:11:11:11:11', ethertype=2048)", "ipv4(tos=32, proto=6, src='192.168.10.10', \ dst='192.168.20.20', ttl=64)", "tcp(dst_port=2222, option=str('\\x00' * 4), \ src_port=11111)", "'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x0\ 8\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x1\ 2\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1\ b\\x1c\\x1d\\x1e\\x1f'" ], "egress":[ "ethernet(dst='22:22:22:22:22:22', \ src='12:11:11:11:11:11', ethertype=2048)", "ipv4(tos=32, proto=6, src='192.168.10.10',\ dst='192.168.20.20', ttl=64)", "tcp(dst_port=2222, option=str('\\x00' * 4), \ src_port=11111)", "'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x0\ 8\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x1\ 2\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1\ b\\x1c\\x1d\\x1e\\x1f'" ] } ] } # meter/01_DROP_00_KBPS_00_1M.json test_json_4 = { "description": "2Mbps(ethernet/ipv4/tcp)-->'in_port=1,\ actions=meter:1Mbps(drop),output:2'", "prerequisite": [ { "OFPMeterMod": { "meter_id": 1, "bands": [ { "OFPMeterBandDrop": { "rate": 1000 } } ] } }, { "OFPFlowMod": { "match": { "OFPMatch": { "oxm_fields": [ { "OXMTlv": { "field": "in_port", "value": "target_recv_port" } } ] } }, "instructions": [ { "OFPInstructionMeter": { "meter_id": 1 } }, { "OFPInstructionActions": { "actions": [ { "OFPActionOutput": { "port": "target_send_port_1" } } ], "type": 4 } } ] } } ], "tests": [ { "ingress": { "packets": { "data": [ "ethernet(dst='22:22:22:22:22:22', \ src='12:11:11:11:11:11', ethertype=2048)", "ipv4(proto=6)", "tcp()", "str('\\x11' * (1500 - 54))" ], "pktps":175, "duration_time":30 } }, "egress":{ "throughput": [ { "OFPMatch": { "oxm_fields": [ { "OXMTlv": { "field": "in_port", "value": "tester_recv_port_1" } } ] }, "kbps": 1000 } ] } } ] } def setUp(self): OfTester.tester_ver = ofproto_v1_3.OFP_VERSION OfTester.target_ver = ofproto_v1_3.OFP_VERSION def tearDown(self): pass def test__normalize_test_json(self): self.tests = TestPatterns( "../switch/of13/action/00_OUTPUT.json", logging.getLogger("test_tester")) self.tests[SAMPLE_DESC]._normalize_test_json(Test_tester.test_json_1) self.tests[SAMPLE_DESC]._normalize_test_json(Test_tester.test_json_2) self.tests[SAMPLE_DESC]._normalize_test_json(Test_tester.test_json_3) self.tests[SAMPLE_DESC]._normalize_test_json(Test_tester.test_json_4) # action/00_OUTPUT.json eq_(Test_tester.test_json_1["prerequisite"][0]["OFPFlowMod"][ "instructions"][0]["OFPInstructionActions"][ "actions"][0]["OFPActionOutput"]["port"], CONF['test-switch']['target_send_port_1']) # group/00_ALL.json eq_(Test_tester.test_json_2["prerequisite"][1]["OFPFlowMod"][ "match"]["OFPMatch"]["oxm_fields"][0]["OXMTlv"]["value"], CONF['test-switch']['target_recv_port']) eq_(Test_tester.test_json_2["prerequisite"][0]["OFPGroupMod"][ "buckets"][0]["OFPBucket"]["actions"][0]["OFPActionOutput"][ "port"], CONF['test-switch']['target_send_port_1']) eq_(Test_tester.test_json_2["prerequisite"][0]["OFPGroupMod"][ "buckets"][1]["OFPBucket"]["actions"][0]["OFPActionOutput"][ "port"], CONF['test-switch']['target_send_port_2']) eq_(Test_tester.test_json_2["tests"][0]["egress"]["throughput"][ 0]["OFPMatch"]["oxm_fields"][0]["OXMTlv"]["value"], CONF['test-switch']['tester_recv_port_1']) eq_(Test_tester.test_json_2["tests"][0]["egress"]["throughput"][ 1]["OFPMatch"]["oxm_fields"][0]["OXMTlv"]["value"], CONF['test-switch']['tester_recv_port_2']) # match/00_IN_PORT.json eq_(Test_tester.test_json_3["prerequisite"][0]["OFPFlowMod"][ "match"]["OFPMatch"]["oxm_fields"][0]["OXMTlv"]["value"], CONF['test-switch']['target_recv_port']) eq_(Test_tester.test_json_3["prerequisite"][0]["OFPFlowMod"][ "instructions"][0]["OFPInstructionActions"]["actions"][0][ "OFPActionOutput"]["port"], CONF['test-switch'][ 'target_send_port_1']) # meter/01_DROP_00_KBPS_00_1M.json eq_(Test_tester.test_json_4["prerequisite"][1]["OFPFlowMod"][ "match"]["OFPMatch"]["oxm_fields"][0]["OXMTlv"]["value"], CONF['test-switch']['target_recv_port']) eq_(Test_tester.test_json_4["prerequisite"][1]["OFPFlowMod"][ "instructions"][1]["OFPInstructionActions"]["actions"][0][ "OFPActionOutput"]["port"], CONF['test-switch']['target_send_port_1']) eq_(Test_tester.test_json_4["tests"][0]["egress"]["throughput"][ 0]["OFPMatch"]["oxm_fields"][0]["OXMTlv"]["value"], CONF['test-switch']['tester_recv_port_1'])
unknown
codeparrot/codeparrot-clean
""" This file implements the Middleware support for the Open edX platform. A microsite enables the following features: 1) Mapping of sub-domain name to a 'brand', e.g. foo-university.edx.org 2) Present a landing page with a listing of courses that are specific to the 'brand' 3) Ability to swap out some branding elements in the website """ from django.conf import settings from microsite_configuration import microsite class MicrositeMiddleware(object): """ Middleware class which will bind configuration information regarding 'microsites' on a per request basis. The actual configuration information is taken from Django settings information """ def process_request(self, request): """ Middleware entry point on every request processing. This will associate a request's domain name with a 'University' and any corresponding microsite configuration information """ microsite.clear() domain = request.META.get('HTTP_HOST', None) microsite.set_by_domain(domain) return None def process_response(self, request, response): """ Middleware entry point for request completion. """ microsite.clear() return response
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2023 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.tests.server.netty import io.ktor.client.* import io.ktor.client.request.* import io.ktor.http.* import io.ktor.http.content.* import io.ktor.network.selector.* import io.ktor.network.sockets.* import io.ktor.server.engine.* import io.ktor.server.netty.* import io.ktor.server.request.* import io.ktor.server.response.* import io.ktor.server.routing.* import io.ktor.server.test.base.* import io.ktor.server.testing.* import io.ktor.utils.io.* import io.ktor.utils.io.core.* import kotlinx.coroutines.* import kotlin.io.use import kotlin.test.* class NettyReadRequestTimeoutTest : EngineTestBase<NettyApplicationEngine, NettyApplicationEngine.Configuration>(Netty) { companion object { private const val TEST_SERVER_HOST = "127.0.0.1" private const val SUCCESS_RESPONSE = "HTTP/1.1 200 OK" private const val REQUEST_TIMEOUT_RESPONSE = "HTTP/1.1 408 Request Timeout" private const val BODY = "Hello world" } private fun getServer(timeout: Int?) = embeddedServer( Netty, module = { routing { get("/echo") { call.respondText(call.receiveText()) } } }, configure = { connector { this.port = this@NettyReadRequestTimeoutTest.port this.host = TEST_SERVER_HOST } if (timeout != null) { requestReadTimeoutSeconds = timeout } } ) private fun requestTimeout(timeout: Int?) = requestTimeoutTest(timeout) { writer, reader -> performAndCheckSuccessRequest("/echo", writer, reader) } @Test fun `no request timeout`() = requestTimeout(timeout = null) @Test fun `big request timeout`() = requestTimeout(timeout = Int.MAX_VALUE) @Test fun `request with readTimeout`() = requestTimeoutTest(timeout = 1) { writer, reader -> performAndCheckRequestTimeoutRequest("/echo", timeout = 1000, writer, reader) } @Test fun `success request and readTimeout request`() = requestTimeoutTest(timeout = 1) { writer, reader -> performAndCheckSuccessRequest("/echo", writer, reader) performAndCheckRequestTimeoutRequest("/echo", timeout = 1000, writer, reader) } @Test fun `test with ktor HttpClient`() = requestTimeoutTest(timeout = 1) { _, _ -> val client = HttpClient() client.performAndCheckRequestWithTimeout() } @Test fun `parallel requests`() = requestTimeoutTest(timeout = 1) { _, _ -> val client = HttpClient() client.performAndCheckRequestWithTimeout() client.performAndCheckRequestWithoutTimeout() } @Ignore @Test fun `parallel timeout requests`() = requestTimeoutTest(timeout = 1) { _, _ -> val client = HttpClient() client.performAndCheckRequestWithTimeout() client.performAndCheckRequestWithTimeout() } private suspend fun HttpClient.performAndCheckRequestWithTimeout() { get { url(host = TEST_SERVER_HOST, path = "/echo", port = this@NettyReadRequestTimeoutTest.port) setBody(object : OutgoingContent.WriteChannelContent() { override suspend fun writeTo(channel: ByteWriteChannel) { delay(1100) channel.writeFully("Hello world".toByteArray()) } }) }.apply { assertEquals(HttpStatusCode.RequestTimeout, status) } } private suspend fun HttpClient.performAndCheckRequestWithoutTimeout() { get { url(host = TEST_SERVER_HOST, path = "/echo", port = this@NettyReadRequestTimeoutTest.port) setBody("Hello world") }.apply { assertEquals(HttpStatusCode.OK, status) } } private suspend fun performAndCheckSuccessRequest( path: String, writeChannel: ByteWriteChannel, readChannel: ByteReadChannel ) { writeChannel.writeHeaders(path) writeChannel.writeBody() val response = readAvailable(readChannel) assertTrue(response.contains(SUCCESS_RESPONSE)) assertTrue(response.contains(BODY)) assertFalse(readChannel.isClosedForRead) } private suspend fun performAndCheckRequestTimeoutRequest( path: String, timeout: Long = 1000, writeChannel: ByteWriteChannel, readChannel: ByteReadChannel ) { writeChannel.writeHeaders(path) delay(timeout) val response = readAvailable(readChannel) assertTrue(response.contains(REQUEST_TIMEOUT_RESPONSE)) // wait for channel to close delay(1000) assertTrue(readChannel.isClosedForRead) } private fun requestTimeoutTest( timeout: Int? = null, block: suspend (ByteWriteChannel, ByteReadChannel) -> Unit ) = runTest { val server = getServer(timeout) server.start(wait = false) SelectorManager().use { aSocket(it).tcp().connect(TEST_SERVER_HOST, port).use { socket -> val writeChannel = socket.openWriteChannel() val readChannel = socket.openReadChannel() block(writeChannel, readChannel) } } server.stop() } private suspend fun ByteWriteChannel.writeHeaders(path: String) { writeStringUtf8("GET $path HTTP/1.1\r\n") writeStringUtf8("Host: $TEST_SERVER_HOST\r\n") writeStringUtf8("Content-Type: text/plain\r\n") writeStringUtf8("Content-Length: ${BODY.length}\r\n") writeStringUtf8("\r\n") flush() } private suspend fun ByteWriteChannel.writeBody() { writeStringUtf8("$BODY\r\n") writeStringUtf8("\r\n") flush() } private suspend fun readAvailable(channel: ByteReadChannel): String { val buffer = ByteArray(1024) val length = channel.readAvailable(buffer) return buffer.decodeToString(0, 0 + length) } }
kotlin
github
https://github.com/ktorio/ktor
ktor-server/ktor-server-netty/jvm/test/io/ktor/tests/server/netty/NettyReadRequestTimeoutTest.kt
import trep from trep import tx,ty,tz,rx,ry,rz import time import trep.visual as visual dt = 0.01 tf = 10.0 def simulate_system(system): # Now we'll extract the current configuration into a tuple to use as # initial conditions for a variational integrator. q0 = system.q # Create and initialize the variational integrator mvi = trep.MidpointVI(system) mvi.initialize_from_configs(0.0, q0, dt, q0) # This is our simulation loop. We save the results in two lists. q = [mvi.q2] t = [mvi.t2] while mvi.t1 < tf: mvi.step(mvi.t2+dt) q.append(mvi.q2) t.append(mvi.t2) return (t,q) system = trep.System() system.import_frames([ rx('theta1'), [ tz(2, mass=1, name='pend1') ], ty(1), [ rx('theta2'), [ tz(2, mass=1, name='pend2') ]] ]) trep.potentials.LinearSpring(system, 'pend1', 'pend2', k=20, x0=1) trep.forces.LinearDamper(system, 'pend1', 'pend2', c=1) trep.potentials.Gravity(system, name="Gravity") system.q = [3,-3] # Simulate start = time.clock() (t, q) = simulate_system(system) finish = time.clock() # Display print "Simulation: dt=%f, tf=%f, runtime=%f s" % (dt, tf, finish-start) visual.visualize_3d([ visual.VisualItem3D(system, t, q) ])
unknown
codeparrot/codeparrot-clean
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Create models for computing firing rates of neuronal populations for a given stimulus with almost convolutional structure, exponential NL and optimize for few cells at a time. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import collections import tensorflow as tf from absl import app from absl import flags from absl import gfile import cPickle as pickle import numpy as np import random from retina.response_model.python.population_subunits.coarse.analysis import almost_convolutional class AlmostConvolutionalExponentialDropout(almost_convolutional.AlmostConvolutionalModel): # Model firing rate for a cell population by almost convolutional subunits. def __init__(self, loss_string, stim, resp, short_filename, window=2, stride=1, lam_w=0, step_sz=1, n_cells=107, task_id=0): ''' firing rate for cell c: lam_c = a_sfm_c'.relu(w.x + bias_su) + bias_cell, x: stimulus, lam_c: firing rate of cell bias_c and bias_su : cell and subunit bias a_sfm_c = softmax(a) : so a cell cannot be connected to all subunits equally well. where w_i are over a small window which are convolutionally related with each other. w_i = w_mother + w_del_i, where w_mother is common accross all 'windows' and w_del is different for different windows. stim, resp: the stimulus and response data tensors short_filename: filename to store results window: (2*window +1) is the convolutional window size stride: stride for convolutions lam_w : regularizing modification weights step_sz : step size for SGD n_cells : total number of cells in response tensor. ''' # add model specific names to filename short_filename = ('model=' + 'almost_convolutional_exponential_dropout' + '_window=' + str(window) + '_stride=' + str(stride) + '_lam_w=' + str(lam_w) + short_filename) # convolution_parameters model_params = collections.namedtuple("model_params", ["mask_tf", "dimx", "dimy", "n_pix", "window", "stride", "n_cells"]) mask_tf, dimx, dimy, n_pix = almost_convolutional.get_windows(window, stride) model_pars = model_params(mask_tf, dimx, dimy, n_pix, window, stride, n_cells) # variables model_vars = self.build_variables(model_pars) # get firing rate lam_normalized, su_act_softmax, su_act, lam = self.build_firing_rate(model_vars, model_pars, stim) # get loss according to specification #if not loss_string == 'conditional_poisson': # print(loss_string) # raise ValueError('Inconsistent loss type and model') if loss_string == 'poisson': fraction=0.2 # select only 20% of the cells select_cells = tf.random_uniform([model_pars.n_cells]) > (1-fraction) print(select_cells) lam_select_cells = tf.transpose(tf.boolean_mask(tf.transpose(lam), select_cells)) resp_select_cells = tf.transpose(tf.boolean_mask(tf.transpose(resp), select_cells)) loss_unregularized = tf.reduce_mean(lam_select_cells/120. - resp_select_cells*tf.log(lam_select_cells)) # poisson loss if loss_string == 'conditional_poisson' : loss_unregularized = -tf.reduce_mean(resp*tf.log(lam_normalized)) # regularization keep 'delta' weights small regularization = lam_w * tf.reduce_sum(tf.nn.l2_loss(model_vars.w_del)) loss = loss_unregularized + regularization # add regularization gradient_update = tf.train.AdagradOptimizer(step_sz).minimize(loss) #with tf.control_dependencies([gradient_update]): # scale_biases = tf.reduce_sum(resp,0)/tf.reduce_sum(lam,0) # bias_cell_su_scale = tf.assign(model_vars.bias_cell_su, # model_vars.bias_cell_su - # tf.log(scale_biases)) # make a combined model update op model_update = gradient_update #tf.group(gradient_update, bias_cell_su_scale) # make model probes model_probes = collections.namedtuple("model_probes", ["su_act_softmax", "su_act", "lam_normalized", "loss", "loss_unregularized", "stim", "resp", "lam", "select_cells"]) model_prb = model_probes(su_act_softmax, su_act, lam_normalized, loss, loss_unregularized, stim, resp, lam, select_cells) self.stim = stim self.resp = resp self.params = model_pars self.update = model_update self.probes = model_prb self.variables = model_vars self.short_filename = short_filename self.build_summaries() def build_variables(self, model_pars): # get convolutional windows dimx = model_pars.dimx dimy = model_pars.dimy n_pix = model_pars.n_pix window = model_pars.window n_cells =model_pars.n_cells # build model variables w_mother = tf.Variable(np.array(1 + 0 * np.random.rand(2 * window + 1, 2 * window + 1, 1, 1), dtype='float32'), name='w_mother') w_del = tf.Variable(np.array(0.5*np.random.randn(dimx, dimy, n_pix), dtype='float32'), name='w_del') # initialize bias_cell_su to 0. use initialize_b to bias_cell_su = tf.Variable(np.array(0.0*np.random.randn(1, dimx, dimy, n_cells), dtype='float32'), name='bias_cell') # collect model parameters model_variables = collections.namedtuple("model_variables", ["w_mother", "w_del", "bias_cell_su"]) model_vars = model_variables(w_mother, w_del, bias_cell_su) return model_vars def build_firing_rate(self, model_vars, model_pars, stim): # now compute the firing rate and subunit activations # given stimulus-response and model parameters # get model parameters mask_tf = model_pars.mask_tf dimx = model_pars.dimx dimy = model_pars.dimy stride = model_pars.stride n_cells = model_pars.n_cells # get model variables w_mother = model_vars.w_mother w_del = model_vars.w_del bias_cell_su = model_vars.bias_cell_su stim4D = tf.expand_dims(tf.reshape(stim, (-1, 40, 80)), 3) stim_convolved = tf.reduce_sum(tf.nn.conv2d(stim4D, w_mother, strides=[1, stride, stride, 1], padding="VALID"), 3) stim_masked = tf.nn.conv2d(stim4D, mask_tf, strides=[1, stride, stride, 1], padding="VALID") stim_del = tf.reduce_sum(tf.mul(stim_masked, w_del), 3) # input from convolutional SU and delta SU su_act_raw = tf.expand_dims(stim_del + stim_convolved, 3) # time x dimx x dimy x 1 su_act = su_act_raw + bias_cell_su # time x dimx x dimy x n_cells # softmax for each cell over time and subunits su_act_softmax = tf.reshape(tf.nn.softmax(tf.reshape(su_act, [-1, n_cells]), dim=0), [-1, dimx, dimy, n_cells]) lam_normalized = tf.reduce_sum(tf.reduce_sum(su_act_softmax, 2), 1) # calculate actual firing rate lam = tf.reduce_sum(tf.reduce_sum(tf.exp(su_act), 2), 1) return lam_normalized, su_act_softmax, su_act, lam def initialize_variables(self, sess): ## Initialize variables or restore from previous fits sess.run(tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())) saver_var = tf.train.Saver(tf.all_variables(), keep_checkpoint_every_n_hours=4) load_prev = False start_iter = 0 try: # restore previous fits if they are available # - useful when programs are preempted frequently on . latest_filename = self.short_filename + '_latest_fn' restore_file = tf.train.latest_checkpoint(self.save_location, latest_filename) # restore previous iteration count and start from there. start_iter = int(restore_file.split('/')[-1].split('-')[-1]) saver_var.restore(sess, restore_file) # restore variables load_prev = True except: tf.logging.info('No previous dataset') if load_prev: tf.logging.info('Previous results loaded from: ' + restore_file) else: self.initialize_b(sess) tf.logging.info('Variables initialized') writer = tf.summary.FileWriter(self.save_location + 'train', sess.graph) tf.logging.info('Loaded iteration: %d' % start_iter) self.saver_var = saver_var self.iter = start_iter self.writer = writer def initialize_b(self,sess, n_batches_init=20): # initialize b based on <yexp(kx)> tf.logging.info('initializing b_cell_su') # setup data threads coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) tf.logging.info('threads started') resp_expanded = tf.expand_dims(tf.expand_dims(self.probes.resp, 1), 2) b_avg = tf.expand_dims(tf.reduce_mean(tf.mul(self.probes.su_act, resp_expanded), 0), 0) b_initialize = np.zeros((1, self.params.dimx, self.params.dimy, self.params.n_cells)) for ibatch in range(n_batches_init): print('init b: %d' % ibatch) b_initialize += sess.run(b_avg) b_initialize /= 1000*n_batches_init #b_max = np.max(np.reshape(b_initialize, [-1, self.params.n_cells]), axis=0) #mask = b_initialize > b_max*0.6 #b_initial_masked = - 40*(1-mask) #b_initial_masked_reshape = np.reshape(b_initial_masked, [1, self.params.dimx, # self.params.dimy, # self.params.n_cells]) # from IPython.terminal.embed import InteractiveShellEmbed # ipshell = InteractiveShellEmbed() # ipshell() b_init_square = np.zeros((1, self.params.dimx, self.params.dimy, self.params.n_cells)) for icell in np.arange(self.params.n_cells): ix, iy = np.where(b_initialize[0, :, :, icell] == np.max(np.ndarray.flatten(b_initialize[0, :, :, icell]))) ix = int(ix) iy = int(iy) xx = -40*np.ones((self.params.dimx, self.params.dimy)) xx[ix-5:ix+5, iy-5:iy+5] = 0 b_init_square[0, :, :, icell] = xx b_init_tf = tf.assign(self.variables.bias_cell_su, b_init_square.astype(np.float32)) sess.run(b_init_tf) #coord.request_stop() #coord.join(threads) tf.logging.info('b_cell_su initialzed based on average activity')
unknown
codeparrot/codeparrot-clean
package checktyperegisterer import ( "context" "fmt" "maps" "strings" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/grafana/grafana-app-sdk/app" "github.com/grafana/grafana-app-sdk/k8s" "github.com/grafana/grafana-app-sdk/logging" "github.com/grafana/grafana-app-sdk/resource" advisorv0alpha1 "github.com/grafana/grafana/apps/advisor/pkg/apis/advisor/v0alpha1" "github.com/grafana/grafana/apps/advisor/pkg/app/checkregistry" "github.com/grafana/grafana/apps/advisor/pkg/app/checks" "github.com/grafana/grafana/pkg/services/org" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Runner is a "runnable" app used to be able to expose and API endpoint // with the existing checks types. This does not need to be a CRUD resource, but it is // the only way existing at the moment to expose the check types. type Runner struct { checkRegistry checkregistry.CheckService client resource.Client orgService org.Service stackID string log logging.Logger retryAttempts int retryDelay time.Duration } var _ app.Runnable = (*Runner)(nil) // NewRunner creates a new Runner. func New(cfg app.Config, log logging.Logger) (*Runner, error) { // Read config specificConfig, ok := cfg.SpecificConfig.(checkregistry.AdvisorAppConfig) if !ok { return nil, fmt.Errorf("invalid config type") } checkRegistry := specificConfig.CheckRegistry orgService := specificConfig.OrgService // Prepare storage client clientGenerator := k8s.NewClientRegistry(cfg.KubeConfig, k8s.ClientConfig{}) client, err := clientGenerator.ClientFor(advisorv0alpha1.CheckTypeKind()) if err != nil { return nil, err } return &Runner{ checkRegistry: checkRegistry, client: client, orgService: orgService, stackID: specificConfig.StackID, log: log.With("runner", "advisor.checktyperegisterer"), retryAttempts: 5, retryDelay: time.Second * 10, }, nil } func (r *Runner) Run(ctx context.Context) error { logger := r.log.WithContext(ctx) // If stackID is empty and OrgService is nil, do nothing (on-demand registration only) if r.stackID == "" && r.orgService == nil { logger.Debug("Auto-registration of checktypes disabled") return nil } // Determine namespaces based on StackID or OrgID namespaces, err := checks.GetNamespaces(ctx, r.stackID, r.orgService) if err != nil { return fmt.Errorf("failed to get namespaces: %w", err) } logger.Debug("Registering check types", "namespaces", len(namespaces)) // Register check types in each namespace for _, namespace := range namespaces { err := r.RegisterCheckTypesInNamespace(ctx, logger, namespace) if err != nil { return fmt.Errorf("failed to register check types in namespace %s: %w", namespace, err) } } return nil } func (r *Runner) registerCheckType(ctx context.Context, logger logging.Logger, checkType string, obj resource.Object) error { for i := 0; i < r.retryAttempts; i++ { current, err := r.client.Get(ctx, obj.GetStaticMetadata().Identifier()) if err != nil { if errors.IsNotFound(err) { // Check type does not exist, create it err = r.create(context.WithoutCancel(ctx), logger, obj) if err != nil { if !r.shouldRetry(err, logger, i+1, checkType) { return nil } // Retry continue } // Success logger.Debug("Check type created successfully", "check_type", checkType) break } if !r.shouldRetry(err, logger, i+1, checkType) { return nil } // Retry continue } // Check type already exists, check if it's the same and update if needed logger.Debug("Check type already exists, checking if it's the same", "identifier", obj.GetStaticMetadata().Identifier()) if r.needsUpdate(current, obj, logger) { err = r.update(context.WithoutCancel(ctx), logger, obj, current) if err != nil { if !r.shouldRetry(err, logger, i+1, checkType) { return nil } // Retry continue } // Success logger.Debug("Check type updated successfully", "check_type", checkType) break } // Check type is the same, no need to update logger.Debug("Check type already registered", "check_type", checkType) break } return nil } func (r *Runner) shouldRetry(err error, logger logging.Logger, attempt int, checkType string) bool { logger.Debug("Error storing check type", "error", err, "attempt", attempt) if isAPIServerShuttingDown(err, logger) { return false } if attempt == r.retryAttempts-1 { logger.Error("Unable to register check type", "check_type", checkType, "error", err) return false } // Calculate exponential backoff delay: baseDelay * 2^attempt delay := r.retryDelay * time.Duration(1<<attempt) time.Sleep(delay) return true } func (r *Runner) create(ctx context.Context, log logging.Logger, obj resource.Object) error { id := obj.GetStaticMetadata().Identifier() _, err := r.client.Create(ctx, id, obj, resource.CreateOptions{}) if err != nil { return err } log.Debug("Check type created successfully", "identifier", id) return nil } func (r *Runner) needsUpdate(current, newObj resource.Object, log logging.Logger) bool { needsUpdate := false // Check if the object annotations exist in the current object currentAnnotations := current.GetAnnotations() if currentAnnotations == nil { currentAnnotations = make(map[string]string) } annotations := newObj.GetAnnotations() for k, v := range annotations { if currentAnnotations[k] != v { needsUpdate = true } } // Compare checktype spec steps with current steps currentCheckType := current.(*advisorv0alpha1.CheckType) newCheckType := newObj.(*advisorv0alpha1.CheckType) newSteps := newCheckType.Spec.Steps currentSteps := currentCheckType.Spec.Steps if !cmp.Equal(newSteps, currentSteps, cmpopts.SortSlices(func(a, b advisorv0alpha1.CheckTypeStep) bool { return a.StepID < b.StepID })) { log.Debug("Check type step mismatch, updating", "identifier", newObj.GetStaticMetadata().Identifier()) needsUpdate = true } return needsUpdate } func (r *Runner) update(ctx context.Context, log logging.Logger, obj resource.Object, current resource.Object) error { id := obj.GetStaticMetadata().Identifier() log.Debug("Updating check type", "identifier", id) currentAnnotations := current.GetAnnotations() if currentAnnotations == nil { currentAnnotations = make(map[string]string) } annotations := obj.GetAnnotations() maps.Copy(currentAnnotations, annotations) obj.SetAnnotations(currentAnnotations) // This will update the annotations in the object _, err := r.client.Update(ctx, id, obj, resource.UpdateOptions{}) if err != nil && !errors.IsAlreadyExists(err) { // Ignore the error, it's probably due to a race condition log.Info("Error updating check type, ignoring", "error", err) } log.Debug("Check type updated successfully", "identifier", id) return nil } func isAPIServerShuttingDown(err error, logger logging.Logger) bool { if strings.Contains(err.Error(), "apiserver is shutting down") { logger.Debug("Error creating check type, not retrying", "error", err) return true } return false } func (r *Runner) RegisterCheckTypesInNamespace(ctx context.Context, logger logging.Logger, namespace string) error { for _, t := range r.checkRegistry.Checks() { steps := t.Steps() stepTypes := make([]advisorv0alpha1.CheckTypeStep, len(steps)) for i, s := range steps { stepTypes[i] = advisorv0alpha1.CheckTypeStep{ Title: s.Title(), Description: s.Description(), StepID: s.ID(), Resolution: s.Resolution(), } } obj := &advisorv0alpha1.CheckType{ ObjectMeta: metav1.ObjectMeta{ Name: t.ID(), Namespace: namespace, Annotations: map[string]string{ checks.NameAnnotation: t.Name(), // Flag to indicate feature availability checks.RetryAnnotation: "1", checks.IgnoreStepsAnnotation: "1", }, }, Spec: advisorv0alpha1.CheckTypeSpec{ Name: t.ID(), Steps: stepTypes, }, } if err := r.registerCheckType(ctx, logger, t.ID(), obj); err != nil { return fmt.Errorf("failed to register check type %s: %w", t.ID(), err) } } return nil }
go
github
https://github.com/grafana/grafana
apps/advisor/pkg/app/checktyperegisterer/checktyperegisterer.go
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2013-2014 DeLance Schmidt # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """PID Controller. """ import sys import subprocess import os import fnmatch import argparse import re class Error(Exception): pass class PIDController( object ): """Main class""" def __init__(self, kp, ki, kd, sampleTime, setpoint, outputMax, outputMin, inputFunction, directController = True): """Instantiates a PID Controller""" self.directController = directController self.sampleTime = sampleTime self.set_tunings(kp, ki, kd) self.kp = kp self.ki = ki self.kd = kd self.on = False self.setpoint = setpoint self.lastError = 0.0 self.errorSum = 0.0 self.lastInput = 0.0 self.iTerm = 0.0 self.output = 0.0 self.outputMax = outputMax self.outputMin = outputMin if ( not ( outputMin < outputMax ) ) or ( (outputMax == 0) and (outputMin == 0)): raise Error( 'Invalid output minimum and maximum' ) self.inputReading = inputFunction def set_tunings(self, kp, ki, kd): """Sets the tuning values for the PID """ if ( kp < 0) or ( ki < 0 ) or ( kd < 0): raise Error( 'kp, ki, and kd cannot be less than zero.' ) sampleTimeInSec = self.sampleTime / 1000 self.kp = kp self.ki = ki * sampleTimeInSec self.kd = kd / sampleTimeInSec if not self.directController: self.kp = 0 - self.kp self.ki = 0 - self.ki self.kd = 0 - self.kd def set_sample_time(self, sampleTime): """Sets the sample time for the PID """ if (sampleTime > 0): ratio = sampleTime / self.sampleTime self.ki = self.ki * ratio self.kd = self.kd * ratio self.sampleTime = sampleTime def compute(self ): """Compute the return value of the PID """ if not self.on: return self.output inputReading = self.inputReading() error = self.setpoint - inputReading self.errorSum = self.errorSum + error self.iTerm = self.iTerm + ( self.ki * error) dInput = inputReading - self.lastInput self.output = self.kp * error + self.iTerm + self.kd * dInput if ( self.output > self.outputMax ): self.output = self.outputMax elif( self.output < self.outputMin ): self.output = self.outputMin self.lastError = error self.lastInput = inputReading return self.output def start(self): if not self.on: self._initialize() self.on = True def stop(self): self.on = False def _initialize(self): self.lastInput = self.inputReading() self.iTerm = self.output if ( self.iTerm > self.outputMax ): self.iTerm = self.outputMax elif( self.iTerm < self.outputMin ): self.iTerm = self.outputMin def main(): parser = argparse.ArgumentParser(description='Python PID controller.') parser.add_argument( 'kp', help='Proportional value.' ) parser.add_argument( 'ki', help='Integral value.' ) parser.add_argument( 'kd', help='Derivative value.' ) parser.add_argument( 'SampleTime', help='Sample time in milliseconds.' ) parser.add_argument( 'Setpoint', help='Setpoint for the controller.' ) parser.add_argument( 'Output Min', help='Maximum output for PID.' ) parser.add_argument( 'Output Max', help='Minimum output for PID.' ) args = parser.parse_args() if __name__ == '__main__': try: main() except KeyboardInterrupt: pass
unknown
codeparrot/codeparrot-clean
from django.template.defaultfilters import striptags from django.test import SimpleTestCase from django.utils.functional import lazystr from django.utils.safestring import mark_safe from ..utils import setup class StriptagsTests(SimpleTestCase): @setup({'striptags01': '{{ a|striptags }} {{ b|striptags }}'}) def test_striptags01(self): output = self.engine.render_to_string( 'striptags01', { 'a': '<a>x</a> <p><b>y</b></p>', 'b': mark_safe('<a>x</a> <p><b>y</b></p>'), }, ) self.assertEqual(output, 'x y x y') @setup({'striptags02': '{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}'}) def test_striptags02(self): output = self.engine.render_to_string( 'striptags02', { 'a': '<a>x</a> <p><b>y</b></p>', 'b': mark_safe('<a>x</a> <p><b>y</b></p>'), }, ) self.assertEqual(output, 'x y x y') class FunctionTests(SimpleTestCase): def test_strip(self): self.assertEqual( striptags('some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags'), 'some html with alert("You smell") disallowed tags', ) def test_non_string_input(self): self.assertEqual(striptags(123), '123') def test_strip_lazy_string(self): self.assertEqual( striptags(lazystr('some <b>html</b> with <script>alert("Hello")</script> disallowed <img /> tags')), 'some html with alert("Hello") disallowed tags', )
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals import sys import warnings from collections import deque from functools import total_ordering from django.db.migrations.state import ProjectState from django.utils import six from django.utils.datastructures import OrderedSet from django.utils.encoding import python_2_unicode_compatible from .exceptions import CircularDependencyError, NodeNotFoundError RECURSION_DEPTH_WARNING = ( "Maximum recursion depth exceeded while generating migration graph, " "falling back to iterative approach. If you're experiencing performance issues, " "consider squashing migrations as described at " "https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations." ) @python_2_unicode_compatible @total_ordering class Node(object): """ A single node in the migration graph. Contains direct links to adjacent nodes in either direction. """ def __init__(self, key): self.key = key self.children = set() self.parents = set() def __eq__(self, other): return self.key == other def __lt__(self, other): return self.key < other def __hash__(self): return hash(self.key) def __getitem__(self, item): return self.key[item] def __str__(self): return str(self.key) def __repr__(self): return '<Node: (%r, %r)>' % self.key def add_child(self, child): self.children.add(child) def add_parent(self, parent): self.parents.add(parent) # Use manual caching, @cached_property effectively doubles the # recursion depth for each recursion. def ancestors(self): # Use self.key instead of self to speed up the frequent hashing # when constructing an OrderedSet. if '_ancestors' not in self.__dict__: ancestors = deque([self.key]) for parent in sorted(self.parents): ancestors.extendleft(reversed(parent.ancestors())) self.__dict__['_ancestors'] = list(OrderedSet(ancestors)) return self.__dict__['_ancestors'] # Use manual caching, @cached_property effectively doubles the # recursion depth for each recursion. def descendants(self): # Use self.key instead of self to speed up the frequent hashing # when constructing an OrderedSet. if '_descendants' not in self.__dict__: descendants = deque([self.key]) for child in sorted(self.children): descendants.extendleft(reversed(child.descendants())) self.__dict__['_descendants'] = list(OrderedSet(descendants)) return self.__dict__['_descendants'] class DummyNode(Node): def __init__(self, key, origin, error_message): super(DummyNode, self).__init__(key) self.origin = origin self.error_message = error_message def __repr__(self): return '<DummyNode: (%r, %r)>' % self.key def promote(self): """ Transition dummy to a normal node and clean off excess attribs. Creating a Node object from scratch would be too much of a hassle as many dependendies would need to be remapped. """ del self.origin del self.error_message self.__class__ = Node def raise_error(self): raise NodeNotFoundError(self.error_message, self.key, origin=self.origin) @python_2_unicode_compatible class MigrationGraph(object): """ Represents the digraph of all migrations in a project. Each migration is a node, and each dependency is an edge. There are no implicit dependencies between numbered migrations - the numbering is merely a convention to aid file listing. Every new numbered migration has a declared dependency to the previous number, meaning that VCS branch merges can be detected and resolved. Migrations files can be marked as replacing another set of migrations - this is to support the "squash" feature. The graph handler isn't responsible for these; instead, the code to load them in here should examine the migration files and if the replaced migrations are all either unapplied or not present, it should ignore the replaced ones, load in just the replacing migration, and repoint any dependencies that pointed to the replaced migrations to point to the replacing one. A node should be a tuple: (app_path, migration_name). The tree special-cases things within an app - namely, root nodes and leaf nodes ignore dependencies to other apps. """ def __init__(self): self.node_map = {} self.nodes = {} self.cached = False def add_node(self, key, migration): # If the key already exists, then it must be a dummy node. dummy_node = self.node_map.get(key) if dummy_node: # Promote DummyNode to Node. dummy_node.promote() else: node = Node(key) self.node_map[key] = node self.nodes[key] = migration self.clear_cache() def add_dummy_node(self, key, origin, error_message): node = DummyNode(key, origin, error_message) self.node_map[key] = node self.nodes[key] = None def add_dependency(self, migration, child, parent, skip_validation=False): """ This may create dummy nodes if they don't yet exist. If `skip_validation` is set, validate_consistency should be called afterwards. """ if child not in self.nodes: error_message = ( "Migration %s dependencies reference nonexistent" " child node %r" % (migration, child) ) self.add_dummy_node(child, migration, error_message) if parent not in self.nodes: error_message = ( "Migration %s dependencies reference nonexistent" " parent node %r" % (migration, parent) ) self.add_dummy_node(parent, migration, error_message) self.node_map[child].add_parent(self.node_map[parent]) self.node_map[parent].add_child(self.node_map[child]) if not skip_validation: self.validate_consistency() self.clear_cache() def remove_replaced_nodes(self, replacement, replaced): """ Removes each of the `replaced` nodes (when they exist). Any dependencies that were referencing them are changed to reference the `replacement` node instead. """ # Cast list of replaced keys to set to speed up lookup later. replaced = set(replaced) try: replacement_node = self.node_map[replacement] except KeyError as exc: exc_value = NodeNotFoundError( "Unable to find replacement node %r. It was either never added" " to the migration graph, or has been removed." % (replacement, ), replacement ) exc_value.__cause__ = exc if not hasattr(exc, '__traceback__'): exc.__traceback__ = sys.exc_info()[2] six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2]) for replaced_key in replaced: self.nodes.pop(replaced_key, None) replaced_node = self.node_map.pop(replaced_key, None) if replaced_node: for child in replaced_node.children: child.parents.remove(replaced_node) # We don't want to create dependencies between the replaced # node and the replacement node as this would lead to # self-referencing on the replacement node at a later iteration. if child.key not in replaced: replacement_node.add_child(child) child.add_parent(replacement_node) for parent in replaced_node.parents: parent.children.remove(replaced_node) # Again, to avoid self-referencing. if parent.key not in replaced: replacement_node.add_parent(parent) parent.add_child(replacement_node) self.clear_cache() def remove_replacement_node(self, replacement, replaced): """ The inverse operation to `remove_replaced_nodes`. Almost. Removes the replacement node `replacement` and remaps its child nodes to `replaced` - the list of nodes it would have replaced. Its parent nodes are not remapped as they are expected to be correct already. """ self.nodes.pop(replacement, None) try: replacement_node = self.node_map.pop(replacement) except KeyError as exc: exc_value = NodeNotFoundError( "Unable to remove replacement node %r. It was either never added" " to the migration graph, or has been removed already." % (replacement, ), replacement ) exc_value.__cause__ = exc if not hasattr(exc, '__traceback__'): exc.__traceback__ = sys.exc_info()[2] six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2]) replaced_nodes = set() replaced_nodes_parents = set() for key in replaced: replaced_node = self.node_map.get(key) if replaced_node: replaced_nodes.add(replaced_node) replaced_nodes_parents |= replaced_node.parents # We're only interested in the latest replaced node, so filter out # replaced nodes that are parents of other replaced nodes. replaced_nodes -= replaced_nodes_parents for child in replacement_node.children: child.parents.remove(replacement_node) for replaced_node in replaced_nodes: replaced_node.add_child(child) child.add_parent(replaced_node) for parent in replacement_node.parents: parent.children.remove(replacement_node) # NOTE: There is no need to remap parent dependencies as we can # assume the replaced nodes already have the correct ancestry. self.clear_cache() def validate_consistency(self): """ Ensure there are no dummy nodes remaining in the graph. """ [n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)] def clear_cache(self): if self.cached: for node in self.nodes: self.node_map[node].__dict__.pop('_ancestors', None) self.node_map[node].__dict__.pop('_descendants', None) self.cached = False def forwards_plan(self, target): """ Given a node, returns a list of which previous nodes (dependencies) must be applied, ending with the node itself. This is the list you would follow if applying the migrations to a database. """ if target not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (target, ), target) # Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents)) self.cached = True node = self.node_map[target] try: return node.ancestors() except RuntimeError: # fallback to iterative dfs warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning) return self.iterative_dfs(node) def backwards_plan(self, target): """ Given a node, returns a list of which dependent nodes (dependencies) must be unapplied, ending with the node itself. This is the list you would follow if removing the migrations from a database. """ if target not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (target, ), target) # Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children)) self.cached = True node = self.node_map[target] try: return node.descendants() except RuntimeError: # fallback to iterative dfs warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning) return self.iterative_dfs(node, forwards=False) def iterative_dfs(self, start, forwards=True): """ Iterative depth first search, for finding dependencies. """ visited = deque() visited.append(start) if forwards: stack = deque(sorted(start.parents)) else: stack = deque(sorted(start.children)) while stack: node = stack.popleft() visited.appendleft(node) if forwards: children = sorted(node.parents, reverse=True) else: children = sorted(node.children, reverse=True) # reverse sorting is needed because prepending using deque.extendleft # also effectively reverses values stack.extendleft(children) return list(OrderedSet(visited)) def root_nodes(self, app=None): """ Returns all root nodes - that is, nodes with no dependencies inside their app. These are the starting point for an app. """ roots = set() for node in self.nodes: if not any(key[0] == node[0] for key in self.node_map[node].parents) and (not app or app == node[0]): roots.add(node) return sorted(roots) def leaf_nodes(self, app=None): """ Returns all leaf nodes - that is, nodes with no dependents in their app. These are the "most current" version of an app's schema. Having more than one per app is technically an error, but one that gets handled further up, in the interactive command - it's usually the result of a VCS merge and needs some user input. """ leaves = set() for node in self.nodes: if not any(key[0] == node[0] for key in self.node_map[node].children) and (not app or app == node[0]): leaves.add(node) return sorted(leaves) def ensure_not_cyclic(self, start, get_children): # Algo from GvR: # http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html todo = set(self.nodes) while todo: node = todo.pop() stack = [node] while stack: top = stack[-1] for node in get_children(top): if node in stack: cycle = stack[stack.index(node):] raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle)) if node in todo: stack.append(node) todo.remove(node) break else: node = stack.pop() def __str__(self): return 'Graph: %s nodes, %s edges' % self._nodes_and_edges() def __repr__(self): nodes, edges = self._nodes_and_edges() return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges) def _nodes_and_edges(self): return len(self.nodes), sum(len(node.parents) for node in self.node_map.values()) def make_state(self, nodes=None, at_end=True, real_apps=None): """ Given a migration node or nodes, returns a complete ProjectState for it. If at_end is False, returns the state before the migration has run. If nodes is not provided, returns the overall most current project state. """ if nodes is None: nodes = list(self.leaf_nodes()) if len(nodes) == 0: return ProjectState() if not isinstance(nodes[0], tuple): nodes = [nodes] plan = [] for node in nodes: for migration in self.forwards_plan(node): if migration not in plan: if not at_end and migration in nodes: continue plan.append(migration) project_state = ProjectState(real_apps=real_apps) for node in plan: project_state = self.nodes[node].mutate_state(project_state, preserve=False) return project_state def __contains__(self, node): return node in self.nodes
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import json def execute(): existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock") frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1) head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"] stock_reco_to_be_reposted = [] for d in frappe.db.sql("""select name, reconciliation_json from `tabStock Reconciliation` where docstatus=1 and creation > '2014-03-01'""", as_dict=1): data = json.loads(d.reconciliation_json) for row in data[data.index(head_row)+1:]: if row[3] in ["", None]: stock_reco_to_be_reposted.append(d.name) break for dn in stock_reco_to_be_reposted: reco = frappe.get_doc("Stock Reconciliation", dn) reco.docstatus = 2 reco.on_cancel() reco.docstatus = 1 reco.validate() reco.on_submit() frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ Flaskr ~~~~~~ A microblog example application written as Flask tutorial with Flask and sqlite3. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import os from sqlite3 import dbapi2 as sqlite3 from flask import Flask, request, session, g, redirect, url_for, abort, \ render_template, flash # create our little application :) app = Flask(__name__) # Load default config and override config from an environment variable app.config.update(dict( DATABASE=os.path.join(app.root_path, 'flaskr.db'), DEBUG=True, SECRET_KEY='development key', USERNAME='admin', PASSWORD='default' )) app.config.from_envvar('FLASKR_SETTINGS', silent=True) def connect_db(): """Connects to the specific database.""" rv = sqlite3.connect(app.config['DATABASE']) rv.row_factory = sqlite3.Row return rv def init_db(): """Initializes the database.""" db = get_db() with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() @app.cli.command('initdb') def initdb_command(): """Creates the database tables.""" init_db() print('Initialized the database.') def get_db(): """Opens a new database connection if there is none yet for the current application context. """ if not hasattr(g, 'sqlite_db'): g.sqlite_db = connect_db() return g.sqlite_db @app.teardown_appcontext def close_db(error): """Closes the database again at the end of the request.""" if hasattr(g, 'sqlite_db'): g.sqlite_db.close() @app.route('/') def show_entries(): db = get_db() cur = db.execute('select title, text from entries order by id desc') entries = cur.fetchall() return render_template('show_entries.html', entries=entries) @app.route('/add', methods=['POST']) def add_entry(): if not session.get('logged_in'): abort(401) db = get_db() db.execute('insert into entries (title, text) values (?, ?)', [request.form['title'], request.form['text']]) db.commit() flash('New entry was successfully posted') return redirect(url_for('show_entries')) @app.route('/login', methods=['GET', 'POST']) def login(): error = None if request.method == 'POST': if request.form['username'] != app.config['USERNAME']: error = 'Invalid username' elif request.form['password'] != app.config['PASSWORD']: error = 'Invalid password' else: session['logged_in'] = True flash('You were logged in') return redirect(url_for('show_entries')) return render_template('login.html', error=error) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('show_entries'))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 """ NumPy is the fundamental package for array computing with Python. It provides: - a powerful N-dimensional array object - sophisticated (broadcasting) functions - tools for integrating C/C++ and Fortran code - useful linear algebra, Fourier transform, and random number capabilities - and much more Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases. All NumPy wheels distributed on PyPI are BSD licensed. """ DOCLINES = (__doc__ or '').split("\n") import os import sys import subprocess import textwrap import warnings if sys.version_info[:2] < (3, 6): raise RuntimeError("Python version >= 3.6 required.") import builtins CLASSIFIERS = """\ Development Status :: 5 - Production/Stable Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved :: BSD License Programming Language :: C Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython Topic :: Software Development Topic :: Scientific/Engineering Typing :: Typed Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System :: Unix Operating System :: MacOS """ MAJOR = 1 MINOR = 20 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) # The first version not in the `Programming Language :: Python :: ...` classifiers above if sys.version_info >= (3, 10): warnings.warn( f"NumPy {VERSION} may not yet support Python " f"{sys.version_info.major}.{sys.version_info.minor}.", RuntimeWarning, ) # Return the git revision as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except (subprocess.SubprocessError, OSError): GIT_REVISION = "Unknown" if not GIT_REVISION: # this shouldn't happen but apparently can (see gh-8512) GIT_REVISION = "Unknown" return GIT_REVISION # BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be # properly updated when the contents of directories change (true for distutils, # not sure about setuptools). if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit hackish: we are setting a global variable so that the main # numpy __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet. While ugly, it's # a lot more robust than what was previously being used. builtins.__NUMPY_SETUP__ = True def get_version_info(): # Adding the git rev number needs to be done inside write_version_py(), # otherwise the import of numpy.version messes up the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('numpy/version.py'): # must be a source distribution, use existing version file try: from numpy.version import git_revision as GIT_REVISION except ImportError: raise ImportError("Unable to import git_revision. Try removing " "numpy/version.py and the build directory " "before building.") else: GIT_REVISION = "Unknown" if not ISRELEASED: FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='numpy/version.py'): cnt = """ # THIS FILE IS GENERATED FROM NUMPY SETUP.PY # # To compare versions robustly, use `numpy.lib.NumpyVersion` short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s if not release: version = full_version """ FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED)}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('numpy') config.add_data_files(('numpy', 'LICENSE.txt')) config.add_data_files(('numpy', 'numpy/*.pxd')) config.get_version('numpy/version.py') # sets config.version return config def check_submodules(): """ verify that the submodules are checked out and clean use `git submodule update --init`; on failure """ if not os.path.exists('.git'): return with open('.gitmodules') as f: for line in f: if 'path' in line: p = line.split('=')[-1].strip() if not os.path.exists(p): raise ValueError('Submodule {} missing'.format(p)) proc = subprocess.Popen(['git', 'submodule', 'status'], stdout=subprocess.PIPE) status, _ = proc.communicate() status = status.decode("ascii", "replace") for line in status.splitlines(): if line.startswith('-') or line.startswith('+'): raise ValueError('Submodule not clean: {}'.format(line)) class concat_license_files(): """Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see gh-13447). This makes GitHub state correctly how NumPy is licensed. """ def __init__(self): self.f1 = 'LICENSE.txt' self.f2 = 'LICENSES_bundled.txt' def __enter__(self): """Concatenate files and remove LICENSES_bundled.txt""" with open(self.f1, 'r') as f1: self.bsd_text = f1.read() with open(self.f1, 'a') as f1: with open(self.f2, 'r') as f2: self.bundled_text = f2.read() f1.write('\n\n') f1.write(self.bundled_text) def __exit__(self, exception_type, exception_value, traceback): """Restore content of both files""" with open(self.f1, 'w') as f: f.write(self.bsd_text) from distutils.command.sdist import sdist class sdist_checked(sdist): """ check submodules on sdist to prevent incomplete tarballs """ def run(self): check_submodules() with concat_license_files(): sdist.run(self) def get_build_overrides(): """ Custom build commands to add `-std=c99` to compilation """ from numpy.distutils.command.build_clib import build_clib from numpy.distutils.command.build_ext import build_ext from distutils.version import LooseVersion def _needs_gcc_c99_flag(obj): if obj.compiler.compiler_type != 'unix': return False cc = obj.compiler.compiler[0] if "gcc" not in cc: return False # will print something like '4.2.1\n' out = subprocess.run([cc, '-dumpversion'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # -std=c99 is default from this version on if LooseVersion(out.stdout) >= LooseVersion('5.0'): return False return True class new_build_clib(build_clib): def build_a_library(self, build_info, lib_name, libraries): if _needs_gcc_c99_flag(self): args = build_info.get('extra_compiler_args') or [] args.append('-std=c99') build_info['extra_compiler_args'] = args build_clib.build_a_library(self, build_info, lib_name, libraries) class new_build_ext(build_ext): def build_extension(self, ext): if _needs_gcc_c99_flag(self): if '-std=c99' not in ext.extra_compile_args: ext.extra_compile_args.append('-std=c99') build_ext.build_extension(self, ext) return new_build_clib, new_build_ext def generate_cython(): cwd = os.path.abspath(os.path.dirname(__file__)) print("Cythonizing sources") for d in ('random',): p = subprocess.call([sys.executable, os.path.join(cwd, 'tools', 'cythonize.py'), 'numpy/{0}'.format(d)], cwd=cwd) if p != 0: raise RuntimeError("Running cythonize failed!") def parse_setuppy_commands(): """Check the commands and respond appropriately. Disable broken commands. Return a boolean value for whether or not to run the build or not (avoid parsing Cython and template files if False). """ args = sys.argv[1:] if not args: # User forgot to give an argument probably, let setuptools handle that. return True info_commands = ['--help-commands', '--name', '--version', '-V', '--fullname', '--author', '--author-email', '--maintainer', '--maintainer-email', '--contact', '--contact-email', '--url', '--license', '--description', '--long-description', '--platforms', '--classifiers', '--keywords', '--provides', '--requires', '--obsoletes'] for command in info_commands: if command in args: return False # Note that 'alias', 'saveopts' and 'setopt' commands also seem to work # fine as they are, but are usually used together with one of the commands # below and not standalone. Hence they're not added to good_commands. good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src') for command in good_commands: if command in args: return True # The following commands are supported, but we need to show more # useful messages to the user if 'install' in args: print(textwrap.dedent(""" Note: if you need reliable uninstall behavior, then install with pip instead of using `setup.py install`: - `pip install .` (from a git repo or downloaded source release) - `pip install numpy` (last NumPy release on PyPi) """)) return True if '--help' in args or '-h' in sys.argv[1]: print(textwrap.dedent(""" NumPy-specific help ------------------- To install NumPy from here with reliable uninstall, we recommend that you use `pip install .`. To install the latest NumPy release from PyPi, use `pip install numpy`. For help with build/installation issues, please ask on the numpy-discussion mailing list. If you are sure that you have run into a bug, please report it at https://github.com/numpy/numpy/issues. Setuptools commands help ------------------------ """)) return False # The following commands aren't supported. They can only be executed when # the user explicitly adds a --force command-line argument. bad_commands = dict( test=""" `setup.py test` is not supported. Use one of the following instead: - `python runtests.py` (to build and test) - `python runtests.py --no-build` (to test installed numpy) - `>>> numpy.test()` (run tests for installed numpy from within an interpreter) """, upload=""" `setup.py upload` is not supported, because it's insecure. Instead, build what you want to upload and upload those files with `twine upload -s <filenames>` instead. """, upload_docs="`setup.py upload_docs` is not supported", easy_install="`setup.py easy_install` is not supported", clean=""" `setup.py clean` is not supported, use one of the following instead: - `git clean -xdf` (cleans all files) - `git clean -Xdf` (cleans all versioned files, doesn't touch files that aren't checked into the git repo) """, check="`setup.py check` is not supported", register="`setup.py register` is not supported", bdist_dumb="`setup.py bdist_dumb` is not supported", bdist="`setup.py bdist` is not supported", build_sphinx=""" `setup.py build_sphinx` is not supported, use the Makefile under doc/""", flake8="`setup.py flake8` is not supported, use flake8 standalone", ) bad_commands['nosetests'] = bad_commands['test'] for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb', 'register', 'check', 'install_data', 'install_headers', 'install_lib', 'install_scripts', ): bad_commands[command] = "`setup.py %s` is not supported" % command for command in bad_commands.keys(): if command in args: print(textwrap.dedent(bad_commands[command]) + "\nAdd `--force` to your command to use it anyway if you " "must (unsupported).\n") sys.exit(1) # Commands that do more than print info, but also don't need Cython and # template parsing. other_commands = ['egg_info', 'install_egg_info', 'rotate'] for command in other_commands: if command in args: return False # If we got here, we didn't detect what setup.py command was given import warnings warnings.warn("Unrecognized setuptools command, proceeding with " "generating Cython sources and expanding templates", stacklevel=2) return True def get_docs_url(): if not ISRELEASED: return "https://numpy.org/devdocs" else: # For releases, this URL ends up on pypi. # By pinning the version, users looking at old PyPI releases can get # to the associated docs easily. return "https://numpy.org/doc/{}.{}".format(MAJOR, MINOR) def setup_package(): src_path = os.path.dirname(os.path.abspath(__file__)) old_path = os.getcwd() os.chdir(src_path) sys.path.insert(0, src_path) # Rewrite the version file every time write_version_py() # The f2py scripts that will be installed if sys.platform == 'win32': f2py_cmds = [ 'f2py = numpy.f2py.f2py2e:main', ] else: f2py_cmds = [ 'f2py = numpy.f2py.f2py2e:main', 'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1], 'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2], ] cmdclass = {"sdist": sdist_checked, } metadata = dict( name='numpy', maintainer="NumPy Developers", maintainer_email="numpy-discussion@python.org", description=DOCLINES[0], long_description="\n".join(DOCLINES[2:]), url="https://www.numpy.org", author="Travis E. Oliphant et al.", download_url="https://pypi.python.org/pypi/numpy", project_urls={ "Bug Tracker": "https://github.com/numpy/numpy/issues", "Documentation": get_docs_url(), "Source Code": "https://github.com/numpy/numpy", }, license='BSD', classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='pytest', cmdclass=cmdclass, python_requires='>=3.6', zip_safe=False, entry_points={ 'console_scripts': f2py_cmds }, ) if "--force" in sys.argv: run_build = True sys.argv.remove('--force') else: # Raise errors for unsupported commands, improve help output, etc. run_build = parse_setuppy_commands() if run_build: # patches distutils, even though we don't use it import setuptools # noqa: F401 from numpy.distutils.core import setup if 'sdist' not in sys.argv: # Generate Cython sources, unless we're generating an sdist generate_cython() metadata['configuration'] = configuration # Customize extension building cmdclass['build_clib'], cmdclass['build_ext'] = get_build_overrides() else: from setuptools import setup # Version number is added to metadata inside configuration() if build # is run. metadata['version'] = get_version_info()[0] try: setup(**metadata) finally: del sys.path[0] os.chdir(old_path) return if __name__ == '__main__': setup_package() # This may avoid problems where numpy is installed via ``*_requires`` by # setuptools, the global namespace isn't reset properly, and then numpy is # imported later (which will then fail to load numpy extension modules). # See gh-7956 for details del builtins.__NUMPY_SETUP__
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import os import pyarrow as pa import pyarrow.jvm as pa_jvm import pytest import six import sys import xml.etree.ElementTree as ET jpype = pytest.importorskip("jpype") @pytest.fixture(scope="session") def root_allocator(): # This test requires Arrow Java to be built in the same source tree pom_path = os.path.join( os.path.dirname(__file__), '..', '..', '..', 'java', 'pom.xml') tree = ET.parse(pom_path) version = tree.getroot().find( 'POM:version', namespaces={ 'POM': 'http://maven.apache.org/POM/4.0.0' }).text jar_path = os.path.join( os.path.dirname(__file__), '..', '..', '..', 'java', 'tools', 'target', 'arrow-tools-{}-jar-with-dependencies.jar'.format(version)) jar_path = os.getenv("ARROW_TOOLS_JAR", jar_path) jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path=" + jar_path) return jpype.JPackage("org").apache.arrow.memory.RootAllocator(sys.maxsize) def test_jvm_buffer(root_allocator): # Create a buffer jvm_buffer = root_allocator.buffer(8) for i in range(8): jvm_buffer.setByte(i, 8 - i) # Convert to Python buf = pa_jvm.jvm_buffer(jvm_buffer) # Check its content assert buf.to_pybytes() == b'\x08\x07\x06\x05\x04\x03\x02\x01' def _jvm_field(jvm_spec): om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')() pojo_Field = jpype.JClass('org.apache.arrow.vector.types.pojo.Field') return om.readValue(jvm_spec, pojo_Field) def _jvm_schema(jvm_spec, metadata=None): field = _jvm_field(jvm_spec) schema_cls = jpype.JClass('org.apache.arrow.vector.types.pojo.Schema') fields = jpype.JClass('java.util.ArrayList')() fields.add(field) if metadata: dct = jpype.JClass('java.util.HashMap')() for k, v in six.iteritems(metadata): dct.put(k, v) return schema_cls(fields, dct) else: return schema_cls(fields) # In the following, we use the JSON serialization of the Field objects in Java. # This ensures that we neither rely on the exact mechanics on how to construct # them using Java code as well as enables us to define them as parameters # without to invoke the JVM. # # The specifications were created using: # # om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')() # field = … # Code to instantiate the field # jvm_spec = om.writeValueAsString(field) @pytest.mark.parametrize('pa_type,jvm_spec', [ (pa.null(), '{"name":"null"}'), (pa.bool_(), '{"name":"bool"}'), (pa.int8(), '{"name":"int","bitWidth":8,"isSigned":true}'), (pa.int16(), '{"name":"int","bitWidth":16,"isSigned":true}'), (pa.int32(), '{"name":"int","bitWidth":32,"isSigned":true}'), (pa.int64(), '{"name":"int","bitWidth":64,"isSigned":true}'), (pa.uint8(), '{"name":"int","bitWidth":8,"isSigned":false}'), (pa.uint16(), '{"name":"int","bitWidth":16,"isSigned":false}'), (pa.uint32(), '{"name":"int","bitWidth":32,"isSigned":false}'), (pa.uint64(), '{"name":"int","bitWidth":64,"isSigned":false}'), (pa.float16(), '{"name":"floatingpoint","precision":"HALF"}'), (pa.float32(), '{"name":"floatingpoint","precision":"SINGLE"}'), (pa.float64(), '{"name":"floatingpoint","precision":"DOUBLE"}'), (pa.time32('s'), '{"name":"time","unit":"SECOND","bitWidth":32}'), (pa.time32('ms'), '{"name":"time","unit":"MILLISECOND","bitWidth":32}'), (pa.time64('us'), '{"name":"time","unit":"MICROSECOND","bitWidth":64}'), (pa.time64('ns'), '{"name":"time","unit":"NANOSECOND","bitWidth":64}'), (pa.timestamp('s'), '{"name":"timestamp","unit":"SECOND",' '"timezone":null}'), (pa.timestamp('ms'), '{"name":"timestamp","unit":"MILLISECOND",' '"timezone":null}'), (pa.timestamp('us'), '{"name":"timestamp","unit":"MICROSECOND",' '"timezone":null}'), (pa.timestamp('ns'), '{"name":"timestamp","unit":"NANOSECOND",' '"timezone":null}'), (pa.timestamp('ns', tz='UTC'), '{"name":"timestamp","unit":"NANOSECOND"' ',"timezone":"UTC"}'), (pa.timestamp('ns', tz='Europe/Paris'), '{"name":"timestamp",' '"unit":"NANOSECOND","timezone":"Europe/Paris"}'), (pa.date32(), '{"name":"date","unit":"DAY"}'), (pa.date64(), '{"name":"date","unit":"MILLISECOND"}'), (pa.decimal128(19, 4), '{"name":"decimal","precision":19,"scale":4}'), (pa.string(), '{"name":"utf8"}'), (pa.binary(), '{"name":"binary"}'), (pa.binary(10), '{"name":"fixedsizebinary","byteWidth":10}'), # TODO(ARROW-2609): complex types that have children # pa.list_(pa.int32()), # pa.struct([pa.field('a', pa.int32()), # pa.field('b', pa.int8()), # pa.field('c', pa.string())]), # pa.union([pa.field('a', pa.binary(10)), # pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE), # pa.union([pa.field('a', pa.binary(10)), # pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE), # TODO: DictionaryType requires a vector in the type # pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])), ]) @pytest.mark.parametrize('nullable', [True, False]) def test_jvm_types(root_allocator, pa_type, jvm_spec, nullable): spec = { 'name': 'field_name', 'nullable': nullable, 'type': json.loads(jvm_spec), # TODO: This needs to be set for complex types 'children': [] } jvm_field = _jvm_field(json.dumps(spec)) result = pa_jvm.field(jvm_field) expected_field = pa.field('field_name', pa_type, nullable=nullable) assert result == expected_field jvm_schema = _jvm_schema(json.dumps(spec)) result = pa_jvm.schema(jvm_schema) assert result == pa.schema([expected_field]) # Schema with custom metadata jvm_schema = _jvm_schema(json.dumps(spec), {'meta': 'data'}) result = pa_jvm.schema(jvm_schema) assert result == pa.schema([expected_field], {'meta': 'data'}) # These test parameters mostly use an integer range as an input as this is # often the only type that is understood by both Python and Java # implementations of Arrow. @pytest.mark.parametrize('pa_type,py_data,jvm_type', [ (pa.bool_(), [True, False, True, True], 'BitVector'), (pa.uint8(), list(range(128)), 'UInt1Vector'), (pa.uint16(), list(range(128)), 'UInt2Vector'), (pa.int32(), list(range(128)), 'IntVector'), (pa.int64(), list(range(128)), 'BigIntVector'), (pa.float32(), list(range(128)), 'Float4Vector'), (pa.float64(), list(range(128)), 'Float8Vector'), (pa.timestamp('s'), list(range(128)), 'TimeStampSecVector'), (pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector'), (pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector'), (pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector'), # TODO(ARROW-2605): These types miss a conversion from pure Python objects # * pa.time32('s') # * pa.time32('ms') # * pa.time64('us') # * pa.time64('ns') (pa.date32(), list(range(128)), 'DateDayVector'), (pa.date64(), list(range(128)), 'DateMilliVector'), # TODO(ARROW-2606): pa.decimal128(19, 4) ]) def test_jvm_array(root_allocator, pa_type, py_data, jvm_type): # Create vector cls = "org.apache.arrow.vector.{}".format(jvm_type) jvm_vector = jpype.JClass(cls)("vector", root_allocator) jvm_vector.allocateNew(len(py_data)) for i, val in enumerate(py_data): jvm_vector.setSafe(i, val) jvm_vector.setValueCount(len(py_data)) py_array = pa.array(py_data, type=pa_type) jvm_array = pa_jvm.array(jvm_vector) assert py_array.equals(jvm_array) # These test parameters mostly use an integer range as an input as this is # often the only type that is understood by both Python and Java # implementations of Arrow. @pytest.mark.parametrize('pa_type,py_data,jvm_type,jvm_spec', [ # TODO: null (pa.bool_(), [True, False, True, True], 'BitVector', '{"name":"bool"}'), ( pa.uint8(), list(range(128)), 'UInt1Vector', '{"name":"int","bitWidth":8,"isSigned":false}' ), ( pa.uint16(), list(range(128)), 'UInt2Vector', '{"name":"int","bitWidth":16,"isSigned":false}' ), ( pa.uint32(), list(range(128)), 'UInt4Vector', '{"name":"int","bitWidth":32,"isSigned":false}' ), ( pa.uint64(), list(range(128)), 'UInt8Vector', '{"name":"int","bitWidth":64,"isSigned":false}' ), ( pa.int8(), list(range(128)), 'TinyIntVector', '{"name":"int","bitWidth":8,"isSigned":true}' ), ( pa.int16(), list(range(128)), 'SmallIntVector', '{"name":"int","bitWidth":16,"isSigned":true}' ), ( pa.int32(), list(range(128)), 'IntVector', '{"name":"int","bitWidth":32,"isSigned":true}' ), ( pa.int64(), list(range(128)), 'BigIntVector', '{"name":"int","bitWidth":64,"isSigned":true}' ), # TODO: float16 ( pa.float32(), list(range(128)), 'Float4Vector', '{"name":"floatingpoint","precision":"SINGLE"}' ), ( pa.float64(), list(range(128)), 'Float8Vector', '{"name":"floatingpoint","precision":"DOUBLE"}' ), ( pa.timestamp('s'), list(range(128)), 'TimeStampSecVector', '{"name":"timestamp","unit":"SECOND","timezone":null}' ), ( pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector', '{"name":"timestamp","unit":"MILLISECOND","timezone":null}' ), ( pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector', '{"name":"timestamp","unit":"MICROSECOND","timezone":null}' ), ( pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector', '{"name":"timestamp","unit":"NANOSECOND","timezone":null}' ), # TODO(ARROW-2605): These types miss a conversion from pure Python objects # * pa.time32('s') # * pa.time32('ms') # * pa.time64('us') # * pa.time64('ns') ( pa.date32(), list(range(128)), 'DateDayVector', '{"name":"date","unit":"DAY"}' ), ( pa.date64(), list(range(128)), 'DateMilliVector', '{"name":"date","unit":"MILLISECOND"}' ), # TODO(ARROW-2606): pa.decimal128(19, 4) ]) def test_jvm_record_batch(root_allocator, pa_type, py_data, jvm_type, jvm_spec): # Create vector cls = "org.apache.arrow.vector.{}".format(jvm_type) jvm_vector = jpype.JClass(cls)("vector", root_allocator) jvm_vector.allocateNew(len(py_data)) for i, val in enumerate(py_data): jvm_vector.setSafe(i, val) jvm_vector.setValueCount(len(py_data)) # Create field spec = { 'name': 'field_name', 'nullable': False, 'type': json.loads(jvm_spec), # TODO: This needs to be set for complex types 'children': [] } jvm_field = _jvm_field(json.dumps(spec)) # Create VectorSchemaRoot jvm_fields = jpype.JClass('java.util.ArrayList')() jvm_fields.add(jvm_field) jvm_vectors = jpype.JClass('java.util.ArrayList')() jvm_vectors.add(jvm_vector) jvm_vsr = jpype.JClass('org.apache.arrow.vector.VectorSchemaRoot') jvm_vsr = jvm_vsr(jvm_fields, jvm_vectors, len(py_data)) py_record_batch = pa.RecordBatch.from_arrays( [pa.array(py_data, type=pa_type)], ['col'] ) jvm_record_batch = pa_jvm.record_batch(jvm_vsr) assert py_record_batch.equals(jvm_record_batch) def _string_to_varchar_holder(ra, string): nvch_cls = "org.apache.arrow.vector.holders.NullableVarCharHolder" holder = jpype.JClass(nvch_cls)() if string is None: holder.isSet = 0 else: holder.isSet = 1 value = jpype.JClass("java.lang.String")("string") std_charsets = jpype.JClass("java.nio.charset.StandardCharsets") bytes_ = value.getBytes(std_charsets.UTF_8) holder.buffer = ra.buffer(len(bytes_)) holder.buffer.setBytes(0, bytes_, 0, len(bytes_)) holder.start = 0 holder.end = len(bytes_) return holder # TODO(ARROW-2607) @pytest.mark.xfail(reason="from_buffers is only supported for " "primitive arrays yet") def test_jvm_string_array(root_allocator): data = [u"string", None, u"töst"] cls = "org.apache.arrow.vector.VarCharVector" jvm_vector = jpype.JClass(cls)("vector", root_allocator) jvm_vector.allocateNew() for i, string in enumerate(data): holder = _string_to_varchar_holder(root_allocator, "string") jvm_vector.setSafe(i, holder) jvm_vector.setValueCount(i + 1) py_array = pa.array(data, type=pa.string()) jvm_array = pa_jvm.array(jvm_vector) assert py_array.equals(jvm_array)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python from __future__ import print_function, division, absolute_import, \ unicode_literals import os import sys from jsbsim import FGFDMExec import matplotlib.pyplot as plt import argparse from pint import UnitRegistry from html_report_generator import HtmlReportGenerator from plots import add_plots from fixedwing_controller import FixedWingController import pyprind import random import copy from analysis import analyse ureg = UnitRegistry() class Simulator: """Simulate mtecs""" def __init__(self, args): """Constructor""" self.args = args self.fdm = FGFDMExec(root_dir=args["jsbsim_root"]) self.fdm.load_model("Rascal110-JSBSim") # settings self.sim_end_time_s = 120 self.dt = 0.005 self.dt_total_energy = 0.02 self.ic = { "hgt": 400 * ureg.meter } # self.mode = "attitude" self.mode = "position" self.noise_enabled = True self.sigmas = { "airspeed": 5.0, "altitude": 2.0, "speed_body_u": 1.0, # TODO think again about introducing the noise in the body frame "speed_body_v": 1.0, "speed_body_w": 1.0, } self.parameters = { "airspeed_trim": 20.0, "airspeed_min": 7.0, "airspeed_max": 60.0, "coordinated_min_speed": 1000.0, "coordinated_method": 0.0, "att_tc": 0.5, "k_p": 0.08, "k_ff": 0.4, "k_i": 0.05, "i_max": 0.4, "pitch_max_rate_pos": 0.0, # 0: disable "pitch_max_rate_neg": 0.0, # 0: disable "pitch_roll_ff": 0.0, "throttle_default": 0.2, "mtecs_acc_p": 0.01, "mtecs_fpa_p": 0.01, "mtecs_throttle_ff": 0.0, "mtecs_throttle_p": 0.1, "mtecs_throttle_i": 0.25, "mtecs_pitch_ff": 0.0, "mtecs_pitch_p": 0.1, "mtecs_pitch_i": 0.03, "mtecs_airspeed_lowpass_cutoff": 0.1, "mtecs_airspeed_derivative_lowpass_cutoff": 0.1, "mtecs_altitude_lowpass_cutoff": 0.1, "mtecs_flightpathangle_lowpass_cutoff": 0.1, } self.control_surface_scaler = 1.0 self.controller = FixedWingController(self.parameters, self.dt_total_energy/self.dt, self.mode) def init_sim(self): """init/reset simulation""" # init states (dictionary of lists (each list contains a time series of # a state/value)) self.jsbs_states = { "ic/gamma-rad": [0], "position/h-sl-meters": [self.ic["hgt"].magnitude], "attitude/phi-rad": [0], "velocities/p-rad_sec": [0], "attitude/theta-rad": [0], "velocities/q-rad_sec": [0], "attitude/psi-rad": [0], "velocities/r-rad_sec": [0], "velocities/u-fps": [0], "velocities/v-fps": [0], "velocities/w-fps": [0], "accelerations/udot-ft_sec2": [0], "accelerations/vdot-ft_sec2": [0], "accelerations/wdot-ft_sec2": [0], "velocities/vt-fps": [ureg.Quantity(self.parameters["airspeed_trim"], "m/s").to(ureg["ft/s"]).magnitude], # XXX is this true airspeed, check... "flight-path/gamma-rad": [0], "propulsion/engine/thrust-lbs": [0] } self.jsbs_ic = { "ic/h-sl-ft": [self.ic["hgt"].to(ureg.foot).magnitude], "ic/vt-kts": [ureg.Quantity(self.parameters["airspeed_trim"], "m/s").to(ureg["kt"]).magnitude], # XXX is this true airspeed, check... "ic/gamma-rad": [0], } self.jsbs_inputs = { "fcs/aileron-cmd-norm": [0], "fcs/elevator-cmd-norm": [0], "fcs/rudder-cmd-norm": [0], "fcs/throttle-cmd-norm": [0.0], "fcs/mixture-cmd-norm": [0.87], "propulsion/magneto_cmd": [3], "propulsion/starter_cmd": [1] } self.sim_states = { "t": [0.0], } self.setpoints = {} self.update_setpoints(0) self.noisy_states = {} self.update_noisy_states(self.get_state()) self.control_data_log = {} # set initial conditions and trim for k, v in self.jsbs_ic.items(): self.fdm.set_property_value(k, v[0]) self.fdm.set_dt(self.dt) self.fdm.reset_to_initial_conditions(0) self.fdm.do_trim(0) def get_state(self): """ creates a dictionary of the current state, to be used as control input """ x = {} x["t"] = self.sim_states["t"][-1] x["roll"] = self.jsbs_states["attitude/phi-rad"][-1] x["roll_rate"] = self.jsbs_states["velocities/p-rad_sec"][-1] x["pitch"] = self.jsbs_states["attitude/theta-rad"][-1] x["pitch_rate"] = self.jsbs_states["velocities/q-rad_sec"][-1] x["yaw"] = self.jsbs_states["attitude/psi-rad"][-1] x["yaw_rate"] = self.jsbs_states["velocities/r-rad_sec"][-1] x["speed_body_u"] = ureg.Quantity( self.jsbs_states["velocities/u-fps"][-1], "ft/s").to(ureg["m/s"]).magnitude x["speed_body_v"] = ureg.Quantity( self.jsbs_states["velocities/v-fps"][-1], "ft/s").to(ureg["m/s"]).magnitude x["speed_body_w"] = ureg.Quantity( self.jsbs_states["velocities/w-fps"][-1], "ft/s").to(ureg["m/s"]).magnitude x["acc_body_x"] = self.jsbs_states["accelerations/udot-ft_sec2"][-1] x["acc_body_y"] = self.jsbs_states["accelerations/vdot-ft_sec2"][-1] x["acc_body_z"] = self.jsbs_states["accelerations/wdot-ft_sec2"][-1] x["airspeed"] = ureg.Quantity( self.jsbs_states["velocities/vt-fps"][-1], "ft/s").to(ureg["m/s"]).magnitude x["altitude"] = self.jsbs_states["position/h-sl-meters"][-1] x["flightpathangle"] = self.jsbs_states["flight-path/gamma-rad"][-1] # additonal/secondary data that is not a state in the physical sense but is needed # by the controller and describes the aircraft state as well: if x["airspeed"] > self.parameters["airspeed_min"]: x["scaler"] = self.parameters["airspeed_trim"] / x["airspeed"] else: x["scaler"] = self.parameters["airspeed_trim"] \ / self.parameters["airspeed_min"] x["lock_integrator"] = False return x def calc_setpoints(self, time): """Generate setpoint to be used in the controller""" r = {} r["roll"] = 0.0 r["pitch"] = 0.0 r["yaw"] = 0.0 r["roll_rate"] = 0.0 r["pitch_rate"] = 0.0 r["yaw_rate"] = 0.0 r["altitude"] = self.ic["hgt"].magnitude if time < 20 else self.ic["hgt"].magnitude + 10 # r["altitude"] = self.ic["hgt"].magnitude r["velocity"] = self.parameters["airspeed_trim"] return r def update_setpoints(self, time): """updates the setpoint""" sp = self.calc_setpoints(time) for k, v in sp.items(): self.setpoints.setdefault(k,[]).append(v) def step(self): """Perform one simulation step implementation is accoding to FGFDMExec's own simulate but we don't want to move the parameters in and out manually """ # control # self.jsbs_inputs["fcs/elevator-cmd-norm"].append(0.01 * (400 - # self.jsbs_states["position/h-sl-meters"][-1])) self.update_setpoints(self.fdm.get_sim_time()) state = self.get_state() self.update_noisy_states(state) state_estimate = self.apply_noise(state) # estimate is simulated as true state plus gaussian noise u, control_data = self.controller.control(state=state_estimate, setpoint={k: v[-1] for k, v in self.setpoints.items()}, parameters = self.parameters) self.jsbs_inputs["fcs/aileron-cmd-norm"].append(u[0] * self.control_surface_scaler) self.jsbs_inputs["fcs/elevator-cmd-norm"].append(-u[1] * self.control_surface_scaler) self.jsbs_inputs["fcs/rudder-cmd-norm"].append(u[2] * self.control_surface_scaler) self.jsbs_inputs["fcs/throttle-cmd-norm"].append(u[3]) # copy control data to for later plotting for k,v in control_data.items(): self.control_data_log.setdefault(k, [0.0]).append(v) # pass control resultto jsbsim for k, v in self.jsbs_inputs.items(): self.fdm.set_property_value(k, v[-1]) # do one step in jsbsim self.fdm.run() # read out result from jsbsim for k, v in self.jsbs_states.items(): self.jsbs_states[k].append(self.fdm.get_property_value(k)) return self.fdm.get_sim_time() def output_results(self): """Generate a report of the simulation""" rg = HtmlReportGenerator(self.args) add_plots(self, rg) # change add_plots to show different plots! rg.variables.update(analyse(self)) rg.generate() rg.save() print("Report saved to {0}".format(self.args["filename_out"])) def apply_noise(self, state): """replaces entries in state with the noisy data (for states for which noise data exists)""" state_estimate = copy.copy(state) for k,v in self.noisy_states.items(): state_estimate[k] = self.noisy_states[k][-1] return state_estimate def update_noisy_states(self, state): """caclculate noisy version of state for which noise data exists""" for k, v in self.sigmas.items(): self.noisy_states.setdefault(k,[]).append(state[k] + random.gauss(0,v)) def main(self): """main method of the simulator""" self.init_sim() # run simulation bar = pyprind.ProgBar(self.sim_end_time_s) time_last_bar_update = 0 while self.sim_states["t"][-1] < self.sim_end_time_s: self.sim_states["t"].append(self.step()) if self.sim_states["t"][-1] >= time_last_bar_update + 1: # throttle update of progress bar bar.update() time_last_bar_update = self.sim_states["t"][-1] bar.update() self.output_results() if __name__ == "__main__": """run with python2 simulator.py""" parser = argparse.ArgumentParser( description='simulates aircraft control with px4/mtecs') parser.add_argument('--test', dest='test', action='store_true') parser.add_argument( '--jsbsim_root', dest='jsbsim_root', default=os.path.dirname(os.path.realpath(sys.argv[0])) + '/../external/') parser.add_argument('-o', dest='filename_out', default='report.html') args = parser.parse_args() s = Simulator(vars(args)) if args.test: s.test() else: s.main()
unknown
codeparrot/codeparrot-clean
#========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ # # Example on the use of the SmoothingRecursiveGaussianImageFilter # import itk from sys import argv itk.auto_progress(2) reader = itk.ImageFileReader.IUC2.New( FileName=argv[1] ) filter = itk.SmoothingRecursiveGaussianImageFilter.New( reader, Sigma=eval( argv[3] ) ) itk.write( filter, argv[2] )
unknown
codeparrot/codeparrot-clean
# $Id: ssl.py 90 2014-04-02 22:06:23Z andrewflnr@gmail.com $ # Portion Copyright 2012 Google Inc. All rights reserved. """Secure Sockets Layer / Transport Layer Security.""" import dpkt import ssl_ciphersuites import struct import binascii import traceback import datetime # # Note from April 2011: cde...@gmail.com added code that parses SSL3/TLS messages more in depth. # # Jul 2012: afleenor@google.com modified and extended SSL support further. # class SSL2(dpkt.Packet): __hdr__ = ( ('len', 'H', 0), ('msg', 's', ''), ('pad', 's', ''), ) def unpack(self, buf): dpkt.Packet.unpack(self, buf) if self.len & 0x8000: n = self.len = self.len & 0x7FFF self.msg, self.data = self.data[:n], self.data[n:] else: n = self.len = self.len & 0x3FFF padlen = ord(self.data[0]) self.msg = self.data[1:1+n] self.pad = self.data[1+n:1+n+padlen] self.data = self.data[1+n+padlen:] # SSLv3/TLS versions SSL3_V = 0x0300 TLS1_V = 0x0301 TLS11_V = 0x0302 TLS12_V = 0x0303 ssl3_versions_str = { SSL3_V: 'SSL3', TLS1_V: 'TLS 1.0', TLS11_V: 'TLS 1.1', TLS12_V: 'TLS 1.2' } SSL3_VERSION_BYTES = set(('\x03\x00', '\x03\x01', '\x03\x02', '\x03\x03')) # Alert levels SSL3_AD_WARNING = 1 SSL3_AD_FATAL = 2 alert_level_str = { SSL3_AD_WARNING: 'SSL3_AD_WARNING', SSL3_AD_FATAL: 'SSL3_AD_FATAL' } # SSL3 alert descriptions SSL3_AD_CLOSE_NOTIFY = 0 SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal SSL3_AD_BAD_RECORD_MAC = 20 # fatal SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal SSL3_AD_NO_CERTIFICATE = 41 SSL3_AD_BAD_CERTIFICATE = 42 SSL3_AD_UNSUPPORTED_CERTIFICATE = 43 SSL3_AD_CERTIFICATE_REVOKED = 44 SSL3_AD_CERTIFICATE_EXPIRED = 45 SSL3_AD_CERTIFICATE_UNKNOWN = 46 SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal # TLS1 alert descriptions TLS1_AD_DECRYPTION_FAILED = 21 TLS1_AD_RECORD_OVERFLOW = 22 TLS1_AD_UNKNOWN_CA = 48 # fatal TLS1_AD_ACCESS_DENIED = 49 # fatal TLS1_AD_DECODE_ERROR = 50 # fatal TLS1_AD_DECRYPT_ERROR = 51 TLS1_AD_EXPORT_RESTRICTION = 60 # fatal TLS1_AD_PROTOCOL_VERSION = 70 # fatal TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal TLS1_AD_INTERNAL_ERROR = 80 # fatal TLS1_AD_USER_CANCELLED = 90 TLS1_AD_NO_RENEGOTIATION = 100 #/* codes 110-114 are from RFC3546 */ TLS1_AD_UNSUPPORTED_EXTENSION = 110 TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111 TLS1_AD_UNRECOGNIZED_NAME = 112 TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113 TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114 TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal # Mapping alert types to strings alert_description_str = { SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY', SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE', SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC', SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE', SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE', SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE', SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE', SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE', SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED', SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED', SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN', SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER', TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED', TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW', TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA', TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED', TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR', TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR', TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION', TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION', TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY', TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR', TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED', TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION', TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION', TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE', TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME', TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE', TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE', TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY' } # struct format strings for parsing buffer lengths # don't forget, you have to pad a 3-byte value with \x00 _SIZE_FORMATS = ['!B', '!H', '!I', '!I'] def parse_variable_array(buf, lenbytes): """ Parse an array described using the 'Type name<x..y>' syntax from the spec Read a length at the start of buf, and returns that many bytes after, in a tuple with the TOTAL bytes consumed (including the size). This does not check that the array is the right length for any given datatype. """ # first have to figure out how to parse length assert lenbytes <= 4 # pretty sure 4 is impossible, too size_format = _SIZE_FORMATS[lenbytes - 1] padding = '\x00' if lenbytes == 3 else '' # read off the length size = struct.unpack(size_format, padding + buf[:lenbytes])[0] # read the actual data data = buf[lenbytes:lenbytes + size] # if len(data) != size: insufficient data return data, size + lenbytes class SSL3Exception(Exception): pass class TLSRecord(dpkt.Packet): """ SSLv3 or TLSv1+ packet. In addition to the fields specified in the header, there are compressed and decrypted fields, indicating whether, in the language of the spec, this is a TLSPlaintext, TLSCompressed, or TLSCiphertext. The application will have to figure out when it's appropriate to change these values. """ __hdr__ = ( ('type', 'B', 0), ('version', 'H', 0), ('length', 'H', 0), ) def __init__(self, *args, **kwargs): # assume plaintext unless specified otherwise in arguments self.compressed = kwargs.pop('compressed', False) self.encrypted = kwargs.pop('encrypted', False) # parent constructor dpkt.Packet.__init__(self, *args, **kwargs) # make sure length and data are consistent self.length = len(self.data) def unpack(self, buf): dpkt.Packet.unpack(self, buf) header_length = self.__hdr_len__ self.data = buf[header_length:header_length+self.length] # make sure buffer was long enough if len(self.data) != self.length: raise dpkt.NeedData('TLSRecord data was too short.') # assume compressed and encrypted when it's been parsed from # raw data self.compressed = True self.encrypted = True class TLSChangeCipherSpec(dpkt.Packet): """ ChangeCipherSpec message is just a single byte with value 1 """ __hdr__ = (('type', 'B', 1),) class TLSAppData(str): """ As far as TLSRecord is concerned, AppData is just an opaque blob. """ pass class TLSAlert(dpkt.Packet): __hdr__ = ( ('level', 'B', 1), ('description', 'B', 0), ) class TLSHelloRequest(dpkt.Packet): __hdr__ = tuple() class TLSClientHello(dpkt.Packet): __hdr__ = ( ('version', 'H', 0x0301), ('random', '32s', '\x00'*32), ) # the rest is variable-length and has to be done manually def unpack(self, buf): dpkt.Packet.unpack(self, buf) # now session, cipher suites, extensions are in self.data self.session_id, pointer = parse_variable_array(self.data, 1) # print 'pointer',pointer # handle ciphersuites ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2) pointer += parsed self.num_ciphersuites = len(ciphersuites) / 2 # check len(ciphersuites) % 2 == 0 ? # compression methods compression_methods, parsed = parse_variable_array( self.data[pointer:], 1) pointer += parsed self.num_compression_methods = parsed - 1 self.compression_methods = map(ord, compression_methods) # extensions class TLSServerHello(dpkt.Packet): __hdr__ = ( ('version', 'H', '0x0301'), ('random', '32s', '\x00'*32), ) # session is variable, forcing rest to be manual def unpack(self, buf): try: dpkt.Packet.unpack(self, buf) self.session_id, pointer = parse_variable_array(self.data, 1) # single cipher suite self.cipher_suite = struct.unpack('!H', self.data[pointer:pointer+2])[0] pointer += 2 # single compression method self.compression = struct.unpack('!B', self.data[pointer:pointer+1])[0] pointer += 1 # ignore extensions for now except struct.error: # probably data too short raise dpkt.NeedData class TLSUnknownHandshake(dpkt.Packet): __hdr__ = tuple() TLSCertificate = TLSUnknownHandshake TLSServerKeyExchange = TLSUnknownHandshake TLSCertificateRequest = TLSUnknownHandshake TLSServerHelloDone = TLSUnknownHandshake TLSCertificateVerify = TLSUnknownHandshake TLSClientKeyExchange = TLSUnknownHandshake TLSFinished = TLSUnknownHandshake # mapping of handshake type ids to their names # and the classes that implement them HANDSHAKE_TYPES = { 0: ('HelloRequest', TLSHelloRequest), 1: ('ClientHello', TLSClientHello), 2: ('ServerHello', TLSServerHello), 11: ('Certificate', TLSCertificate), 12: ('ServerKeyExchange', TLSServerKeyExchange), 13: ('CertificateRequest', TLSCertificateRequest), 14: ('ServerHelloDone', TLSServerHelloDone), 15: ('CertificateVerify', TLSCertificateVerify), 16: ('ClientKeyExchange', TLSClientKeyExchange), 20: ('Finished', TLSFinished), } class TLSHandshake(dpkt.Packet): ''' A TLS Handshake message This goes for all messages encapsulated in the Record layer, but especially important for handshakes and app data: A message may be spread across a number of TLSRecords, in addition to the possibility of there being more than one in a given Record. You have to put together the contents of TLSRecord's yourself. ''' # struct.unpack can't handle the 3-byte int, so we parse it as bytes # (and store it as bytes so dpkt doesn't get confused), and turn it into # an int in a user-facing property __hdr__ = ( ('type', 'B', 0), ('length_bytes', '3s', 0), ) def unpack(self, buf): dpkt.Packet.unpack(self, buf) # Wait, might there be more than one message of self.type? embedded_type = HANDSHAKE_TYPES.get(self.type, None) if embedded_type is None: raise SSL3Exception('Unknown or invalid handshake type %d' % self.type) # only take the right number of bytes self.data = self.data[:self.length] if len(self.data) != self.length: raise dpkt.NeedData # get class out of embedded_type tuple self.data = embedded_type[1](self.data) @property def length(self): return struct.unpack('!I', '\x00' + self.length_bytes)[0] RECORD_TYPES = { 20: TLSChangeCipherSpec, 21: TLSAlert, 22: TLSHandshake, 23: TLSAppData, } class SSLFactory(object): def __new__(cls, buf): v = buf[1:3] if v in [ '\x03\x00', '\x03\x01', '\x03\x02' ]: return SSL3(buf) # SSL2 has no characteristic header or magic bytes, so we just assume # that the msg is an SSL2 msg if it is not detected as SSL3+ return SSL2(buf) def TLSMultiFactory(buf): ''' Attempt to parse one or more TLSRecord's out of buf Args: buf: string containing SSL/TLS messages. May have an incomplete record on the end Returns: [TLSRecord] int, total bytes consumed, != len(buf) if an incomplete record was left at the end. Raises SSL3Exception. ''' i, n = 0, len(buf) msgs = [] while i < n: v = buf[i+1:i+3] if v in SSL3_VERSION_BYTES: try: msg = TLSRecord(buf[i:]) msgs.append(msg) except dpkt.NeedData: break else: raise SSL3Exception('Bad TLS version in buf: %r' % buf[i:i+5]) i += len(msg) return msgs, i import unittest _hexdecode = binascii.a2b_hex class TLSRecordTest(unittest.TestCase): """ Test basic TLSRecord functionality For this test, the contents of the record doesn't matter, since we're not parsing the next layer. """ def setUp(self): # add some extra data, to make sure length is parsed correctly self.p = TLSRecord('\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz') def testContentType(self): self.assertEqual(self.p.type, 23) def testVersion(self): self.assertEqual(self.p.version, 0x0301) def testLength(self): self.assertEqual(self.p.length, 8) def testData(self): self.assertEqual(self.p.data, 'abcdefgh') def testInitialFlags(self): self.assertTrue(self.p.compressed) self.assertTrue(self.p.encrypted) def testRepack(self): p2 = TLSRecord(type=23, version=0x0301, data='abcdefgh') self.assertEqual(p2.type, 23) self.assertEqual(p2.version, 0x0301) self.assertEqual(p2.length, 8) self.assertEqual(p2.data, 'abcdefgh') self.assertEqual(p2.pack(), self.p.pack()) def testTotalLength(self): # that len(p) includes header self.assertEqual(len(self.p), 13) def testRaisesNeedDataWhenBufIsShort(self): self.assertRaises( dpkt.NeedData, TLSRecord, '\x16\x03\x01\x00\x10abc') class TLSChangeCipherSpecTest(unittest.TestCase): "It's just a byte. This will be quick, I promise" def setUp(self): self.p = TLSChangeCipherSpec('\x01') def testParses(self): self.assertEqual(self.p.type, 1) def testTotalLength(self): self.assertEqual(len(self.p), 1) class TLSAppDataTest(unittest.TestCase): "AppData is basically just a string" def testValue(self): d = TLSAppData('abcdefgh') self.assertEqual(d, 'abcdefgh') class TLSHandshakeTest(unittest.TestCase): def setUp(self): self.h = TLSHandshake('\x00\x00\x00\x01\xff') def testCreatedInsideMessage(self): self.assertTrue(isinstance(self.h.data, TLSHelloRequest)) def testLength(self): self.assertEqual(self.h.length, 0x01) def testRaisesNeedData(self): self.assertRaises(dpkt.NeedData, TLSHandshake, '\x00\x00\x01\x01') class ClientHelloTest(unittest.TestCase): 'This data is extracted from and verified by Wireshark' def setUp(self): self.data = _hexdecode( "01000199" # handshake header "0301" # version "5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand "2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id # cipher suites "005400ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003feff000ac006c010c00bc00100020001" "0100" # compresssion methods # extensions "00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100002300d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca61144fcdd95e071b94d0cf7233740000" "FFFFFFFFFFFFFFFF") # random garbage self.p = TLSHandshake(self.data) def testClientHelloConstructed(self): 'Make sure the correct class was constructed' #print self.p self.assertTrue(isinstance(self.p.data, TLSClientHello)) # def testClientDateCorrect(self): # self.assertEqual(self.p.random_unixtime, 1342710284) def testClientRandomCorrect(self): self.assertEqual(self.p.data.random, _hexdecode('5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d')) def testCipherSuiteLength(self): # we won't bother testing the identity of each cipher suite in the list. self.assertEqual(self.p.data.num_ciphersuites, 42) #self.assertEqual(len(self.p.ciphersuites), 42) def testSessionId(self): self.assertEqual(self.p.data.session_id, _hexdecode('09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1')) def testCompressionMethods(self): self.assertEqual(self.p.data.num_compression_methods, 1) def testTotalLength(self): self.assertEqual(len(self.p), 413) class ServerHelloTest(unittest.TestCase): 'Again, from Wireshark' def setUp(self): self.data = _hexdecode('0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100') self.p = TLSHandshake(self.data) def testConstructed(self): self.assertTrue(isinstance(self.p.data, TLSServerHello)) # def testDateCorrect(self): # self.assertEqual(self.p.random_unixtime, 1342710284) def testRandomCorrect(self): self.assertEqual(self.p.data.random, _hexdecode('5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd')) def testCipherSuite(self): self.assertEqual( ssl_ciphersuites.BY_CODE[self.p.data.cipher_suite].name, 'TLS_RSA_WITH_NULL_SHA') def testTotalLength(self): self.assertEqual(len(self.p), 81) class TLSMultiFactoryTest(unittest.TestCase): "Made up test data" def setUp(self): self.data = _hexdecode('1703010010' # header 1 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1 '1703010010' # header 2 'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2 '1703010010' # header 3 'CCCCCCCC') # data 3 (incomplete) self.msgs, self.bytes_parsed = TLSMultiFactory(self.data) def testNumMessages(self): # only complete messages should be parsed, incomplete ones left # in buffer self.assertEqual(len(self.msgs), 2) def testBytesParsed(self): self.assertEqual(self.bytes_parsed, (5 + 16) * 2) def testFirstMsgData(self): self.assertEqual(self.msgs[0].data, _hexdecode('AA' * 16)) def testSecondMsgData(self): self.assertEqual(self.msgs[1].data, _hexdecode('BB' * 16)) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 # Generates random shapefile which may be used for benchmarks import os import sys import random import string import math from osgeo import ogr from optparse import OptionParser def error(msg): print(msg) sys.exit(1) parser = OptionParser("usage: %prog [options] output") parser.add_option("-t", "--type", dest="type", type="choice", choices=("point", "line", "polygon"), default="point", help="Geometry type") parser.add_option("-f", "--features", dest="features", type="int", default=1000, help="Number of features") parser.add_option("-c", "--coordinates", dest="coordinates", type="int", default=10, help="Number of coordinates per feature (lines and polygons)") parser.add_option("-a", "--attributes", dest="attributes", type="int", default=10, help="Number of attributes") parser.add_option("-e", "--extent", dest="extent", type="string", default="-180,-90,180,90", help="Extent") (options, args) = parser.parse_args() if len(args) != 1: error("Output file path missing") (minx, miny, maxx, maxy) = map(float, options.extent.split(",")) driverName = "ESRI Shapefile" drv = ogr.GetDriverByName(driverName) if drv is None: error("%s driver not available.\n" % driverName) # delete if exists try: if os.path.exists(args[0]): drv.DeleteDataSource(args[0]) except: pass ds = drv.CreateDataSource(args[0]) if ds is None: error("Creation of output file failed.\n") types = {"point": ogr.wkbPoint, "line": ogr.wkbLineString, "polygon": ogr.wkbPolygon} lyr = ds.CreateLayer("out", None, types[options.type]) if lyr is None: error("Layer creation failed.\n") attrTypes = (ogr.OFTString, ogr.OFTInteger, ogr.OFTReal) stringWidth = 100 for a in range(0, options.attributes): attrName = "attr%s" % a field_defn = ogr.FieldDefn(attrName, random.choice(attrTypes)) if field_defn.type == ogr.OFTString: field_defn.SetWidth(stringWidth) if lyr.CreateField(field_defn) != 0: error("Creating Name field failed.\n") feat_defn = lyr.GetLayerDefn() for f in range(options.features): feat = ogr.Feature(feat_defn) buffer = (maxx - minx) / 100 if options.type == "point": geo = ogr.Geometry(ogr.wkbPoint) x = random.uniform(minx, maxx) y = random.uniform(miny, maxy) geo.SetPoint_2D(0, x, y) elif options.type == "line": geo = ogr.Geometry(ogr.wkbLineString) xc = random.uniform(minx + buffer, maxx - buffer) yc = random.uniform(miny + buffer, maxy - buffer) for c in range(options.coordinates): a = c * 2 * math.pi / options.coordinates r = random.uniform(buffer / 10, 9 * buffer / 10) x = xc + r * math.sin(a) y = yc + r * math.cos(a) geo.SetPoint_2D(c, x, y) elif options.type == "polygon": ring = ogr.Geometry(ogr.wkbLinearRing) xc = random.uniform(minx + buffer, maxx - buffer) yc = random.uniform(miny + buffer, maxy - buffer) for c in range(options.coordinates): a = c * 2 * math.pi / options.coordinates r = random.uniform(buffer / 10, 9 * buffer / 10) x = xc + r * math.sin(a) y = yc + r * math.cos(a) ring.SetPoint_2D(c, x, y) geo = ogr.Geometry(ogr.wkbPolygon) geo.AddGeometry(ring) feat.SetGeometry(geo) for i in range(feat_defn.GetFieldCount()): field_defn = feat_defn.GetFieldDefn(i) val = None limit = 10000000 if field_defn.GetType() == ogr.OFTString: nChars = random.randint(0, stringWidth) val = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(nChars)) elif field_defn.GetType() == ogr.OFTInteger: val = random.randint(-limit, limit) elif field_defn.GetType() == ogr.OFTReal: val = random.uniform(-limit, limit) feat.SetField(field_defn.name, val) if lyr.CreateFeature(feat) != 0: error("Failed to create feature in shapefile.\n")
unknown
codeparrot/codeparrot-clean
# # (c) 2017 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: Ansible Networking Team cliconf: iosxr short_description: Use iosxr cliconf to run command on Cisco IOS XR platform description: - This iosxr plugin provides low level abstraction apis for sending and receiving CLI commands from Cisco IOS XR network devices. version_added: "2.4" """ import re import json from ansible.errors import AnsibleConnectionFailure from ansible.module_utils._text import to_text from ansible.module_utils.common._collections_compat import Mapping from ansible.module_utils.connection import ConnectionError from ansible.module_utils.network.common.config import NetworkConfig, dumps from ansible.module_utils.network.common.utils import to_list from ansible.module_utils.network.iosxr.iosxr import sanitize_config, mask_config_blocks_from_diff from ansible.plugins.cliconf import CliconfBase class Cliconf(CliconfBase): def get_device_info(self): device_info = {} device_info['network_os'] = 'iosxr' reply = self.get('show version | utility head -n 20') data = to_text(reply, errors='surrogate_or_strict').strip() match = re.search(r'Version (\S+)$', data, re.M) if match: device_info['network_os_version'] = match.group(1) match = re.search(r'image file is "(.+)"', data) if match: device_info['network_os_image'] = match.group(1) model_search_strs = [r'^[Cc]isco (.+) \(revision', r'^[Cc]isco (\S+ \S+).+bytes of .*memory'] for item in model_search_strs: match = re.search(item, data, re.M) if match: device_info['network_os_model'] = match.group(1) break match = re.search(r'^(.+) uptime', data, re.M) if match: device_info['network_os_hostname'] = match.group(1) return device_info def configure(self, admin=False, exclusive=False): prompt = to_text(self._connection.get_prompt(), errors='surrogate_or_strict').strip() if not prompt.endswith(')#'): if admin and 'admin-' not in prompt: self.send_command('admin') if exclusive: self.send_command('configure exclusive') return self.send_command('configure terminal') def abort(self, admin=False): prompt = to_text(self._connection.get_prompt(), errors='surrogate_or_strict').strip() if prompt.endswith(')#'): self.send_command('abort') if admin and 'admin-' in prompt: self.send_command('exit') def get_config(self, source='running', format='text', flags=None): if source not in ['running']: raise ValueError("fetching configuration from %s is not supported" % source) lookup = {'running': 'running-config'} cmd = 'show {0} '.format(lookup[source]) cmd += ' '.join(to_list(flags)) cmd = cmd.strip() return self.send_command(cmd) def edit_config(self, candidate=None, commit=True, admin=False, exclusive=False, replace=None, comment=None, label=None): operations = self.get_device_operations() self.check_edit_config_capability(operations, candidate, commit, replace, comment) resp = {} results = [] requests = [] self.configure(admin=admin, exclusive=exclusive) if replace: candidate = 'load {0}'.format(replace) for line in to_list(candidate): if not isinstance(line, Mapping): line = {'command': line} cmd = line['command'] results.append(self.send_command(**line)) requests.append(cmd) # Before any commit happend, we can get a real configuration # diff from the device and make it available by the iosxr_config module. # This information can be usefull either in check mode or normal mode. resp['show_commit_config_diff'] = self.get('show commit changes diff') if commit: self.commit(comment=comment, label=label, replace=replace) else: self.discard_changes() self.abort(admin=admin) resp['request'] = requests resp['response'] = results return resp def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): diff = {} device_operations = self.get_device_operations() option_values = self.get_option_values() if candidate is None and device_operations['supports_generate_diff']: raise ValueError("candidate configuration is required to generate diff") if diff_match not in option_values['diff_match']: raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match']))) if diff_replace not in option_values['diff_replace']: raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace']))) # prepare candidate configuration sanitized_candidate = sanitize_config(candidate) candidate_obj = NetworkConfig(indent=1) candidate_obj.load(sanitized_candidate) if running and diff_match != 'none': # running configuration running = mask_config_blocks_from_diff(running, candidate, "ansible") running = sanitize_config(running) running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines) configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace) else: configdiffobjs = candidate_obj.items diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else '' return diff def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False): if output: raise ValueError("'output' value %s is not supported for get" % output) return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all) def commit(self, comment=None, label=None, replace=None): cmd_obj = {} if replace: cmd_obj['command'] = 'commit replace' cmd_obj['prompt'] = 'This commit will replace or remove the entire running configuration' cmd_obj['answer'] = 'yes' else: if comment and label: cmd_obj['command'] = 'commit label {0} comment {1}'.format(label, comment) elif comment: cmd_obj['command'] = 'commit comment {0}'.format(comment) elif label: cmd_obj['command'] = 'commit label {0}'.format(label) else: cmd_obj['command'] = 'commit show-error' # In some cases even a normal commit, i.e., !replace, # throws a prompt and we need to handle it before # proceeding further cmd_obj['prompt'] = '(C|c)onfirm' cmd_obj['answer'] = 'y' self.send_command(**cmd_obj) def run_commands(self, commands=None, check_rc=True): if commands is None: raise ValueError("'commands' value is required") responses = list() for cmd in to_list(commands): if not isinstance(cmd, Mapping): cmd = {'command': cmd} output = cmd.pop('output', None) if output: raise ValueError("'output' value %s is not supported for run_commands" % output) try: out = self.send_command(**cmd) except AnsibleConnectionFailure as e: if check_rc: raise out = getattr(e, 'err', e) if out is not None: try: out = to_text(out, errors='surrogate_or_strict').strip() except UnicodeError: raise ConnectionError(message=u'Failed to decode output from %s: %s' % (cmd, to_text(out))) try: out = json.loads(out) except ValueError: pass responses.append(out) return responses def discard_changes(self): self.send_command('abort') def get_device_operations(self): return { 'supports_diff_replace': True, 'supports_commit': True, 'supports_rollback': False, 'supports_defaults': False, 'supports_onbox_diff': False, 'supports_commit_comment': True, 'supports_multiline_delimiter': False, 'supports_diff_match': True, 'supports_diff_ignore_lines': True, 'supports_generate_diff': True, 'supports_replace': True, 'supports_admin': True, 'supports_commit_label': True } def get_option_values(self): return { 'format': ['text'], 'diff_match': ['line', 'strict', 'exact', 'none'], 'diff_replace': ['line', 'block', 'config'], 'output': [] } def get_capabilities(self): result = super(Cliconf, self).get_capabilities() result['rpc'] += ['commit', 'discard_changes', 'get_diff', 'configure', 'exit'] result['device_operations'] = self.get_device_operations() result.update(self.get_option_values()) return json.dumps(result) def set_cli_prompt_context(self): """ Make sure we are in the operational cli mode :return: None """ if self._connection.connected: self._update_cli_prompt_context(config_context=')#', exit_command='abort')
unknown
codeparrot/codeparrot-clean
"""Automatically download MLdata datasets.""" # Copyright (c) 2011 Pietro Berkes # License: BSD 3 clause import os from os.path import join, exists import re import numbers try: # Python 2 from urllib2 import HTTPError from urllib2 import quote from urllib2 import urlopen except ImportError: # Python 3+ from urllib.error import HTTPError from urllib.parse import quote from urllib.request import urlopen import numpy as np import scipy as sp from scipy import io from shutil import copyfileobj from .base import get_data_home, Bunch MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s" def mldata_filename(dataname): """Convert a raw name for a data set in a mldata.org filename.""" dataname = dataname.lower().replace(' ', '-') return re.sub(r'[().]', '', dataname) def fetch_mldata(dataname, target_name='label', data_name='data', transpose_data=True, data_home=None): """Fetch an mldata.org data set If the file does not exist yet, it is downloaded from mldata.org . mldata.org does not have an enforced convention for storing data or naming the columns in a data set. The default behavior of this function works well with the most common cases: 1) data values are stored in the column 'data', and target values in the column 'label' 2) alternatively, the first column stores target values, and the second data values 3) the data array is stored as `n_features x n_samples` , and thus needs to be transposed to match the `sklearn` standard Keyword arguments allow to adapt these defaults to specific data sets (see parameters `target_name`, `data_name`, `transpose_data`, and the examples below). mldata.org data sets may have multiple columns, which are stored in the Bunch object with their original name. Parameters ---------- dataname: Name of the data set on mldata.org, e.g.: "leukemia", "Whistler Daily Snowfall", etc. The raw name is automatically converted to a mldata.org URL . target_name: optional, default: 'label' Name or index of the column containing the target values. data_name: optional, default: 'data' Name or index of the column containing the data. transpose_data: optional, default: True If True, transpose the downloaded data array. data_home: optional, default: None Specify another download and cache folder for the data sets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'DESCR', the full description of the dataset, and 'COL_NAMES', the original names of the dataset columns. Examples -------- Load the 'iris' dataset from mldata.org: >>> from sklearn.datasets.mldata import fetch_mldata >>> import tempfile >>> test_data_home = tempfile.mkdtemp() >>> iris = fetch_mldata('iris', data_home=test_data_home) >>> iris.target.shape (150,) >>> iris.data.shape (150, 4) Load the 'leukemia' dataset from mldata.org, which needs to be transposed to respects the sklearn axes convention: >>> leuk = fetch_mldata('leukemia', transpose_data=True, ... data_home=test_data_home) >>> leuk.data.shape (72, 7129) Load an alternative 'iris' dataset, which has different names for the columns: >>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1, ... data_name=0, data_home=test_data_home) >>> iris3 = fetch_mldata('datasets-UCI iris', ... target_name='class', data_name='double0', ... data_home=test_data_home) >>> import shutil >>> shutil.rmtree(test_data_home) """ # normalize dataset name dataname = mldata_filename(dataname) # check if this data set has been already downloaded data_home = get_data_home(data_home=data_home) data_home = join(data_home, 'mldata') if not exists(data_home): os.makedirs(data_home) matlab_name = dataname + '.mat' filename = join(data_home, matlab_name) # if the file does not exist, download it if not exists(filename): urlname = MLDATA_BASE_URL % quote(dataname) try: mldata_url = urlopen(urlname) except HTTPError as e: if e.code == 404: e.msg = "Dataset '%s' not found on mldata.org." % dataname raise # store Matlab file try: with open(filename, 'w+b') as matlab_file: copyfileobj(mldata_url, matlab_file) except: os.remove(filename) raise mldata_url.close() # load dataset matlab file with open(filename, 'rb') as matlab_file: matlab_dict = io.loadmat(matlab_file, struct_as_record=True) # -- extract data from matlab_dict # flatten column names col_names = [str(descr[0]) for descr in matlab_dict['mldata_descr_ordering'][0]] # if target or data names are indices, transform then into names if isinstance(target_name, numbers.Integral): target_name = col_names[target_name] if isinstance(data_name, numbers.Integral): data_name = col_names[data_name] # rules for making sense of the mldata.org data format # (earlier ones have priority): # 1) there is only one array => it is "data" # 2) there are multiple arrays # a) copy all columns in the bunch, using their column name # b) if there is a column called `target_name`, set "target" to it, # otherwise set "target" to first column # c) if there is a column called `data_name`, set "data" to it, # otherwise set "data" to second column dataset = {'DESCR': 'mldata.org dataset: %s' % dataname, 'COL_NAMES': col_names} # 1) there is only one array => it is considered data if len(col_names) == 1: data_name = col_names[0] dataset['data'] = matlab_dict[data_name] # 2) there are multiple arrays else: for name in col_names: dataset[name] = matlab_dict[name] if target_name in col_names: del dataset[target_name] dataset['target'] = matlab_dict[target_name] else: del dataset[col_names[0]] dataset['target'] = matlab_dict[col_names[0]] if data_name in col_names: del dataset[data_name] dataset['data'] = matlab_dict[data_name] else: del dataset[col_names[1]] dataset['data'] = matlab_dict[col_names[1]] # set axes to sklearn conventions if transpose_data: dataset['data'] = dataset['data'].T if 'target' in dataset: if not sp.sparse.issparse(dataset['target']): dataset['target'] = dataset['target'].squeeze() return Bunch(**dataset) # The following is used by nosetests to setup the docstring tests fixture def setup_module(module): # setup mock urllib2 module to avoid downloading from mldata.org from sklearn.utils.testing import install_mldata_mock install_mldata_mock({ 'iris': { 'data': np.empty((150, 4)), 'label': np.empty(150), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, 'leukemia': { 'data': np.empty((72, 7129)), }, }) def teardown_module(module): from sklearn.utils.testing import uninstall_mldata_mock uninstall_mldata_mock()
unknown
codeparrot/codeparrot-clean
from app import db from flask.ext.restless import ProcessingException class Allergy(db.Model): __tablename__ = 'allergy' id = db.Column('id', db.Integer, primary_key=True) name = db.Column('name', db.String(63)) description = db.Column('description', db.String(255)) def __init__(self, name=None, description=None): self.name = name self.description = description def __repr__(self): return '<Allergy %r>' % (self.name) def getExclude(): return [] @staticmethod def post_single_preprocessor(data=None, **kw): # todo stuff return data def serialize(self, related = True): allergyDict = { 'id' : self.id, 'name' : self.name, 'description' : self.description } return allergyDict @staticmethod def get_allergies_by_list(list_of_ids): AllergyArray = db.session.query(Allergy).filter(Allergy.id.in_(list_of_ids)).all() if len(list_of_ids) != len(AllergyArray): raise ProcessingException( description='Invalid allergy_id in array', code=400 ) return AllergyArray
unknown
codeparrot/codeparrot-clean
########################################################### # # Copyright (c) 2008, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ['Pipeline'] IMPORT_ERROR = None try: from xml.dom.ext.reader import Sax2 from xml import xpath except ImportError: IMPORT_ERROR = "WARNING: pipeline.py in client api requires PyXML'" class Pipeline(object): '''class that stores the data structure of the a pipeline. Internally, this is stored as a linked list''' def __init__(my, pipeline_xml): if IMPORT_ERROR: print IMPORT_ERROR return my.doc = Sax2.FromXml(pipeline_xml) def get_first_process_name(my): # for now, just assume the first process nodes = xpath.Evaluate("/pipeline/process", my.doc) node = nodes[0] return node.getAttribute('name') def get_process_info(my, process_name): processes = xpath.Evaluate("/pipeline/process[@name='%s']" % process_name, my.doc) if not processes: return {} process = processes[0] #print "get_process_info: ", process_name return process_name def get_output_process_names(my, process_name): attrs = xpath.Evaluate("/pipeline/connect[@from='%s']/@to" % process_name, my.doc) return [x.value for x in attrs] def get_input_process_names(my, process_name): attrs = xpath.Evaluate("/pipeline/connect[@to='%s']/@from" % process_name, my.doc) return [x.value for x in attrs] def get_handler_class(my, process_name): nodes = xpath.Evaluate("/pipeline/process[@name='%s']/action" % process_name, my.doc) if not nodes: return "" action = nodes[0] action_class = action.getAttribute("class") return action_class def get_action_options(my, process_name): options = {} nodes = xpath.Evaluate("/pipeline/process[@name='%s']/action" % process_name, my.doc) if not nodes: return options action_node = nodes[0] nodes = action_node.childNodes for node in nodes: name = node.nodeName if name == "#text": continue value = my._get_node_value(node) options[name] = value return options def _get_node_value(cls, node): '''Gets the value of a node. This value is often the first child of the node''' value = node.nodeValue if value == None: if node.firstChild == None: value = "" else: value = node.firstChild.nodeValue return value _get_node_value = classmethod(_get_node_value)
unknown
codeparrot/codeparrot-clean
// Copyright 2004-present Facebook. All Rights Reserved. #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/UpSample.h> #include <c10/util/irange.h> #include <c10/util/TypeCast.h> namespace at::native::upsample { TORCH_API c10::SmallVector<int64_t, 3> compute_output_size( c10::IntArrayRef input_size, // Full input tensor size. at::OptionalIntArrayRef output_size, std::optional<c10::ArrayRef<double>> scale_factors) { const auto spatial_dimensions = static_cast<int64_t>(input_size.size()) - 2; if (output_size) { TORCH_CHECK(!scale_factors, "Must specify exactly one of output_size and scale_factors"); TORCH_CHECK(static_cast<int64_t>(output_size->size()) == spatial_dimensions); return {output_size->data(), output_size->data() + output_size->size()}; } if (scale_factors) { TORCH_CHECK(!output_size, "Must specify exactly one of output_size and scale_factors"); TORCH_CHECK(static_cast<int64_t>(scale_factors->size()) == spatial_dimensions); c10::SmallVector<int64_t, 3> ret; for (const auto i : c10::irange(spatial_dimensions)) { const double odim = static_cast<double>(input_size[i+2]) * scale_factors.value()[i]; ret.push_back(c10::checked_convert<int64_t>(odim, "int64_t")); } return ret; } TORCH_CHECK(false, "Must specify exactly one of output_size and scale_factors"); } } // namespace at::native::upsample
cpp
github
https://github.com/pytorch/pytorch
aten/src/ATen/native/UpSample.cpp
#!/usr/bin/env python3 # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest import command import config import thread_cert LEADER = 1 ROUTER1 = 2 BR = 3 ED1 = 17 DUT_REED = 18 ROUTER_SELECTION_JITTER = 1 class Cert_5_2_5_AddressQuery(thread_cert.TestCase): TOPOLOGY = { LEADER: { 'mode': 'rdn', 'panid': 0xface, 'allowlist': [ROUTER1, BR, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ED1] }, ROUTER1: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER, DUT_REED] }, BR: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 4: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 5: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 6: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 7: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 8: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 9: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 10: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 11: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 12: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 13: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 14: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 15: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, 16: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [LEADER] }, ED1: { 'is_mtd': True, 'mode': 'rn', 'panid': 0xface, 'allowlist': [LEADER] }, DUT_REED: { 'mode': 'rdn', 'panid': 0xface, 'router_selection_jitter': 1, 'allowlist': [ROUTER1] }, } def test(self): # 1. LEADER: DHCPv6 Server for prefix 2001::/64. self.nodes[LEADER].start() self.simulator.go(5) self.assertEqual(self.nodes[LEADER].get_state(), 'leader') self.nodes[LEADER].add_prefix('2001::/64', 'pdros') self.nodes[LEADER].register_netdata() self.simulator.set_lowpan_context(1, '2001::/64') # 2. BR: SLAAC Server for prefix 2002::/64. self.nodes[BR].start() self.simulator.go(5) self.assertEqual(self.nodes[BR].get_state(), 'router') self.nodes[BR].add_prefix('2002::/64', 'paros') self.nodes[BR].register_netdata() self.simulator.set_lowpan_context(2, '2002::/64') # 3. Bring up remaining devices except DUT_REED. for i in range(2, 17): if i == BR: continue self.nodes[i].start() self.simulator.go(5) self.assertEqual(self.nodes[i].get_state(), 'router') self.nodes[ED1].start() self.simulator.go(5) self.assertEqual(self.nodes[ED1].get_state(), 'child') # 4. Bring up DUT_REED. self.nodes[DUT_REED].start() self.simulator.go(5) self.simulator.go(ROUTER_SELECTION_JITTER) reed_messages = self.simulator.get_messages_sent_by(DUT_REED) # Verify DUT_REED doesn't try to become router. msg = reed_messages.does_not_contain_coap_message() assert msg is True, "Error: The REED sent an Address Solicit Request" # 5. Enable a link between the DUT and BR to create a one-way link. self.nodes[DUT_REED].add_allowlist(self.nodes[BR].get_addr64()) self.nodes[BR].add_allowlist(self.nodes[DUT_REED].get_addr64()) # 6. Verify DUT_REED would send Address Notification when ping to its # ML-EID. mleid = self.nodes[DUT_REED].get_ip6_address(config.ADDRESS_TYPE.ML_EID) self.assertTrue(self.nodes[ED1].ping(mleid)) # Wait for sniffer collecting packets self.simulator.go(1) reed_messages = self.simulator.get_messages_sent_by(DUT_REED) msg = reed_messages.next_coap_message('0.02', '/a/an') command.check_address_notification(msg, self.nodes[DUT_REED], self.nodes[LEADER]) # 7 & 8. Verify DUT_REED would send Address Notification when ping to # its 2001::EID and 2002::EID. flag2001 = 0 flag2002 = 0 for global_address in self.nodes[DUT_REED].get_ip6_address(config.ADDRESS_TYPE.GLOBAL): if global_address[0:4] == '2001': flag2001 += 1 elif global_address[0:4] == '2002': flag2002 += 1 else: raise "Error: Address is unexpected." self.assertTrue(self.nodes[ED1].ping(global_address)) # Wait for sniffer collecting packets self.simulator.go(1) reed_messages = self.simulator.get_messages_sent_by(DUT_REED) msg = reed_messages.next_coap_message('0.02', '/a/an') command.check_address_notification(msg, self.nodes[DUT_REED], self.nodes[LEADER]) assert flag2001 == 1, "Error: Expecting address 2001::EID not appear." assert flag2002 == 1, "Error: Expecting address 2002::EID not appear." if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
--- title: useOutletContext --- # useOutletContext <!-- ⚠️ ⚠️ IMPORTANT ⚠️ ⚠️ Thank you for helping improve our documentation! This file is auto-generated from the JSDoc comments in the source code, so please edit the JSDoc comments in the file below and this file will be re-generated once those changes are merged. https://github.com/remix-run/react-router/blob/main/packages/react-router/lib/hooks.tsx --> [MODES: framework, data, declarative] ## Summary [Reference Documentation ↗](https://api.reactrouter.com/v7/functions/react-router.useOutletContext.html) Returns the parent route [`<Outlet context>`](../components/Outlet). Often parent routes manage state or other values you want shared with child routes. You can create your own [context provider](https://react.dev/learn/passing-data-deeply-with-context) if you like, but this is such a common situation that it's built-into [`<Outlet>`](../components/Outlet). ```tsx // Parent route function Parent() { const [count, setCount] = React.useState(0); return <Outlet context={[count, setCount]} />; } ``` ```tsx // Child route import { useOutletContext } from "react-router"; function Child() { const [count, setCount] = useOutletContext(); const increment = () => setCount((c) => c + 1); return <button onClick={increment}>{count}</button>; } ``` If you're using TypeScript, we recommend the parent component provide a custom hook for accessing the context value. This makes it easier for consumers to get nice typings, control consumers, and know who's consuming the context value. Here's a more realistic example: ```tsx filename=src/routes/dashboard.tsx lines=[14,20] import { useState } from "react"; import { Outlet, useOutletContext } from "react-router"; import type { User } from "./types"; type ContextType = { user: User | null }; export default function Dashboard() { const [user, setUser] = useState<User | null>(null); return ( <div> <h1>Dashboard</h1> <Outlet context={{ user } satisfies ContextType} /> </div> ); } export function useUser() { return useOutletContext<ContextType>(); } ``` ```tsx filename=src/routes/dashboard/messages.tsx lines=[1,4] import { useUser } from "../dashboard"; export default function DashboardMessages() { const { user } = useUser(); return ( <div> <h2>Messages</h2> <p>Hello, {user.name}!</p> </div> ); } ``` ## Signature ```tsx function useOutletContext<Context = unknown>(): Context ``` ## Returns The context value passed to the parent [`Outlet`](../components/Outlet) component
unknown
github
https://github.com/remix-run/react-router
docs/api/hooks/useOutletContext.md
- Feature Name: Multi-tenant cluster settings - Status: completed - Start Date: 2021-11-06 - Edited: 2023-09-27 - Authors: Radu Berinde, knz, ssd - RFC PR: [#85970](https://github.com/cockroachdb/cockroach/pull/85970), previously [#73349](https://github.com/cockroachdb/cockroach/pull/73349) - Cockroach Issue: [#77935](https://github.com/cockroachdb/cockroach/issue/77935), [#85729](https://github.com/cockroachdb/cockroach/issues/85729) # Summary This RFC introduces an update to our cluster settings infrastructure aimed at solving shortcomings in multi-tenant environments. We introduce different *classes* of cluster settings, each with its own semantics. # Motivation Cluster settings are used to control various aspects of CockroachDB. Some of them apply exclusively to the KV subsystem; some apply only to the SQL layer. Yet others are harder to classify - for example, they may apply to an aspect of the KV subsystem, but the SQL layer also needs to interact with the setting. Currently all cluster settings are treated homogeneously; their current values are stored in the `system.settings` table of the system tenant, at the level of the storage cluster. With cluster virtualization, the KV/storage and SQL layers are separated. For example, KV is handled by a single shared storage cluster; in contrast, each virtual cluster runs its own separate instance of the SQL layer, across multiple SQL pods (that form the "logical cluster"). As of this writing (2021) each virtual cluster has its own separate instance of all cluster settings (and its own `system.settings` table). Some settings are designated as `SystemOnly` to indicate that they are only applicable to the storage layer (these settings are not expected to be consulted by virtual cluster servers). Virtual clusters can freely change all other settings, but only those that affect the SQL code run inside the virtual cluster will make any difference. Beyond the obvious usability issues, there are important functional gaps: - we need settings that can be read by VC server processes but which cannot be modified by the end-user. For example: controls for the RU accounting subsystem. - in certain cases VC code may need to consult values for cluster settings that apply to the storage cluster: for example `kv.closed_timestamp.follower_reads.enabled` applies to the KV subsystem but is read by the SQL code when serving queries. # Technical design We propose splitting the cluster settings into three *classes*: 1. System only (`system-only`) Settings associated with the storage layer, only usable by the system tenant. These settings are not visible at all from virtual clusters. Settings code prevents use of values for these settings from a VC server process. Example: `kv.allocator.qps_rebalance_threshold`. 2. System visible `system-visible` (previously: "Tenant read-only `system-visible`") These settings are visible from virtual clusters but the virtual clusters cannot modify the values. The observed value of settings in this class is: - by default, the value held for the setting in the system tenant (the storage cluster's value in the system tenant's `system.settings`). - New SQL syntax allows the system tenant to set the value for a specific tenant; this results in the tenant (asynchronously) getting the updated value. (i.e. the value for one tenant can be overridden away from the default) Examples: - Settings that affect the KV replication layer, should not be writable by tenants, but which benefit the SQL layer in tenants: `kv.raft.command.max_size`. - Settings that benefit from being overridden per tenant, but where inheriting the system tenant's value when not overridden is OK: `kv.bulk_ingest.batch_size`, `tenant_cpu_usage_allowance`. 3. Application level (`application`, previously: "Tenant writable `tenant-rw`) These settings are contained by each virtual cluster and can be modified by the virtual cluster. They can also be overridden from the system tenant using the same override mechanism as above. Example: `sql.notices.enabled`. The difference between the three classes, and with/without override, is as follows: | Behavior | System only | System visible | Application writable | |--------------------------------------------------------------------------------|----------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------|----------------------------------------------------------------| | Value lookup order | N/A on virtual clusters; in system tenant, 1) local `settings` 2) compile-time default | 1) per-VC override 2) system tenant `settings` 3) compile-time default | 1) per-VC override 2) local `settings` 2) compile-time default | | Can run (RE)SET CLUSTER SETTING in system tenant | yes | yes | yes | | Can run (RE)SET CLUSTER SETTING in virtual cluster | no | no | yes | | Can set virtual cluster override from system tenant | no | yes | yes | | Value in current VC's `system.settings` is used as configuration | only in system tenant | no (local value always ignored) | yes, but only if there's no override | | Default value when the current VC's `system.settings` does not contain a value | compile-time default | per-VC override if any, otherwise system tenant value, otherwise compile-time default | per-VC override if any, otherwise compile-time default | In effect, this means there's two ways to create a "read-only" setting in virtual clusters: - using a "System visible" setting. In that case, the value is taken from the system tenant and *shared across all tenants*. - using an "Application writable" setting, and adding an override in the system tenant. In that case, the value is taken from the override and *can be specialized per virtual cluster*. When should one choose one over the other? The determination should be done based on whether the configuration is system-wide or can be meaningfully different per virtual cluster. #### A note on the threat model The described restrictions assume that each virtual cluster server process is not compromised. There is no way to prevent a compromised process from changing its own view of the cluster settings. However, even a compromised process should never be able to learn the values for the "System only" settings or modify settings for other virtual cluster. It's also worth considering how a compromised VC server process can influence future uncompromised processes. ### SQL changes New statements for the system tenant only: - `ALTER VIRTUAL CLUSTER <id> SET CLUSTER SETTING <setting> = <value>` - Sets the value seen by a VC. For `application`, this value will override any setting from the VC's side, until the cluster setting is reset. - `ALTER VIRTUAL CLUSTER ALL SET CLUSTER SETTING <setting> = <value>` - Sets the value seen by all non-system VCs, except those that have a specific value for that VC (set with `ALTER VIRTUAL CLUSTER <id>`). For `application`, this value will override any setting from the VC's side, until the cluster setting is reset. Note that this statement does not affect the system tenant's settings. - `ALTER VIRTUAL CLUSTER <id> RESET CLUSTER SETTING <setting>` - Resets the VC setting override. For `system-visible`, the value reverts to the shared value in `system.settings` (if it is set), otherwise to the setting default. For `application`, the value reverts to the `ALL` value (if it is set), otherwise to whatever value was set by the VC (if it is set), otherwise the build-time default. - `ALTER VIRTUAL CLUSTER ALL RESET CLUSTER SETTING <setting>` - Resets the all-VCs setting override. For VCs that have a specific value set for that VC (using `ALTER VIRTUAL CLUSTER <id>`), there is no change. For other VCs, `system-visible` values revert to the value set in system tenant's `system.settings`, or build-time default if there's no customization; and `application` values revert to whatever value was set by the VC (if it is set), otherwise the build-time default. - `SHOW CLUSTER SETTING <setting> FOR VIRTUAL CLUSTER <id>` - Display the setting override. If there is no override, the statement returns NULL. (We choose to not make this statement 'peek' into the VC to display the customization set by the VC itself.) - `SHOW [ALL] CLUSTER SETTINGS FOR VIRTUAL CLUSTER <id>` - Display the setting overrides for the given VC. If there is no override, the statement returns NULL. In all statements above, using `id=1` (the system tenant's ID) is not valid. New semantics for existing statements for VCs: - `SHOW [ALL] CLUSTER SETTINGS` shows the `system-visible` and `application` settings. `system-visible` settings that have an override from the KV side are marked as such in the description. - `SET/RESET CLUSTER SETTING` can only be used with `application` settings. For settings that have overrides from the KV side, the statement will fail explaining that the setting can only be changed once the KV side resets the override. ## Implementation The proposed implementation is as follows: - We update the semantics of the existing `system.settings` table: - on the system tenant, this table continues to store values for all settings (for the system tenant only, and secondary VCs for `system-visible` settings) - on other VCs, this table stores only values for `application` settings. Any table rows for other types of variables are ignored (in the case that the VC manually inserts data into the table). - We add a new `system.tenant_settings` table with following schema: ``` CREATE TABLE system.tenant_settings ( tenant_id INT8 NOT NULL, name STRING NOT NULL, value STRING NOT NULL, value_type STRING, last_updated TIMESTAMP NOT NULL DEFAULT now() ON UPDATE now(), reason STRING, PRIMARY KEY (tenant_id, name) ) ``` This table is only used on the system tenant. All-VC override values are stored in `tenant_id=0`. This table contains no settings for the system VC (`tenant_id=1`), and the `tenant_id=0` values do not apply to the system tenant. - We modify the tenant connector APIs to allow "listening" for updates to cluster settings. Inside the tenant connector this can be implemented using a streaming RPC (similar to `GossipSubscription`). - On the system tenant we set up rangefeed on `system.tenant_settings` and keep all the changed settings (for all VCs) in memory. We expect that in practice overrides for specific VCs are rare (with most being "all VC" overrides). The rangefeed is used to implement the API used by the tenant connector to keep VCs up to date. We continue to set up the rangefeed on the `system.settings` table to maintain the system tenant settings. - On non-system VCs we continue to set up the rangefeed on the VC's `system.settings` table, and we also use the new connector API to listen to updates from the storage cluster. Values from the storage cluster which are present always override any local values. ### Upgrade The proposed solution has very few concerns around upgrade. There will be a migration to create the new system table, and the new connector API implementation is only active on the new version (in a mixed-version cluster, it can error out or use a stub no-op implementation). The new statements (around setting per-VC values) should also error out until the new version is active. The settings on the system tenant will continue to work. On non-system VCs, any locally changed settings that are now `system` or `application` will revert to their defaults. It will be necessary to set these settings from the system tenant (using the new statements) if any clusters rely on non-default values. ### Notes All functions used to register cluster settings take an extra argument with the class of the setting. We want to make an explicit (and reviewable) decision for each existing cluster setting, and we want the authors of future settings to be forced to think about the class. When deciding which class is appropriate for a given setting, we will use the following guidelines: - if the setting controls a user-visible aspect of SQL, it should be a `application` setting. - control settings relevant to VC-specific internal implementation (like VC throttling) that we want to be able to control per-VC should be `system-visible`, or possibly `application` with an override, depending on whether we want different overrides for different VCs. - when in doubt the first choice to consider should be `application`. - `system` should be used with caution - we have to be sure that there is no internal code running on the VC that needs to consult them. We fully hide `system` settings from non-system VCs. The cluster settings subsystem will not allow accessing these values from a VC process (it will crash the VC process, at least in testing builds, to find any internal code that incorrectly relies on them). The values of these settings are unknown to the VC APIs for changing VC settings (i.e. if a VC attempts to read or set such a setting, it will get the "unknown cluster setting" error). ## Alternatives There are three possibilities in terms of the system table changes: - a) Add a new `system.tenant_settings` table (as described above). - Pro: clean schema, easier to reason about. - Pro: no schema changes on the existing system table. - b) Use the existing `system.settings` table as is. For VC-specific settings and overrides, encode the tenant ID in the setting name (which is the table PK), for example: `tenant-10/sql.notices.enabled`. - Pro: no migrations (schema changes) for the existing system table. - Pro: requires a single range feed. - Pro: existing SET CLUSTER SETTING (in system tenant) continues to "just" work. - Con: semantics are not as "clean"; goes against the principle of taking advantage of SQL schema when possible. A CHECK constraint can be used to enforce correct encoding. - c) Modify the existing `system.settings` to add a `tenant_id` column, and change the PK to `(tenant_id, name)`. - Pro: clean schema - Pro: requires a single range feed. - Con: requires migration (schema change) for the existing system table (with the added concern that we have code that parses the raw KVs for this table directly). A previous proposal was to store `system-visible` values in each VC's `system.settings` table and disallowing arbitrary writes to that table. While this would be less work in the short-term, it will give us ongoing headaches because it breaks the VC keyspace abstraction. For example, restoring a backup will be problematic. Another proposal was to store all VC settings on the storage side and allow the VC to update them via the tenant connector. This is problematic for a number of reasons, including transactionality of setting changes and opportunities for abuse. A previous version of the proposal included a "system visible" (or "shared read-only") class, for system settings that the VCs can read. However, given the support for all-VC values for `system-visible`, the functional differences between these two classes becomes very small.
unknown
github
https://github.com/cockroachdb/cockroach
docs/RFCS/20211106_multitenant_cluster_settings.md
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.cloud.vision_v1p2beta1.types import geometry __protobuf__ = proto.module( package='google.cloud.vision.v1p2beta1', manifest={ 'TextAnnotation', 'Page', 'Block', 'Paragraph', 'Word', 'Symbol', }, ) class TextAnnotation(proto.Message): r"""TextAnnotation contains a structured representation of OCR extracted text. The hierarchy of an OCR extracted text structure is like this: TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol Each structural component, starting from Page, may further have their own properties. Properties describe detected languages, breaks etc.. Please refer to the [TextAnnotation.TextProperty][google.cloud.vision.v1p2beta1.TextAnnotation.TextProperty] message definition below for more detail. Attributes: pages (Sequence[google.cloud.vision_v1p2beta1.types.Page]): List of pages detected by OCR. text (str): UTF-8 text detected on the pages. """ class DetectedLanguage(proto.Message): r"""Detected language for a structural component. Attributes: language_code (str): The BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. confidence (float): Confidence of detected language. Range [0, 1]. """ language_code = proto.Field( proto.STRING, number=1, ) confidence = proto.Field( proto.FLOAT, number=2, ) class DetectedBreak(proto.Message): r"""Detected start or end of a structural component. Attributes: type_ (google.cloud.vision_v1p2beta1.types.TextAnnotation.DetectedBreak.BreakType): Detected break type. is_prefix (bool): True if break prepends the element. """ class BreakType(proto.Enum): r"""Enum to denote the type of break found. New line, space etc.""" UNKNOWN = 0 SPACE = 1 SURE_SPACE = 2 EOL_SURE_SPACE = 3 HYPHEN = 4 LINE_BREAK = 5 type_ = proto.Field( proto.ENUM, number=1, enum='TextAnnotation.DetectedBreak.BreakType', ) is_prefix = proto.Field( proto.BOOL, number=2, ) class TextProperty(proto.Message): r"""Additional information detected on the structural component. Attributes: detected_languages (Sequence[google.cloud.vision_v1p2beta1.types.TextAnnotation.DetectedLanguage]): A list of detected languages together with confidence. detected_break (google.cloud.vision_v1p2beta1.types.TextAnnotation.DetectedBreak): Detected start or end of a text segment. """ detected_languages = proto.RepeatedField( proto.MESSAGE, number=1, message='TextAnnotation.DetectedLanguage', ) detected_break = proto.Field( proto.MESSAGE, number=2, message='TextAnnotation.DetectedBreak', ) pages = proto.RepeatedField( proto.MESSAGE, number=1, message='Page', ) text = proto.Field( proto.STRING, number=2, ) class Page(proto.Message): r"""Detected page from OCR. Attributes: property (google.cloud.vision_v1p2beta1.types.TextAnnotation.TextProperty): Additional information detected on the page. width (int): Page width. For PDFs the unit is points. For images (including TIFFs) the unit is pixels. height (int): Page height. For PDFs the unit is points. For images (including TIFFs) the unit is pixels. blocks (Sequence[google.cloud.vision_v1p2beta1.types.Block]): List of blocks of text, images etc on this page. confidence (float): Confidence of the OCR results on the page. Range [0, 1]. """ property = proto.Field( proto.MESSAGE, number=1, message='TextAnnotation.TextProperty', ) width = proto.Field( proto.INT32, number=2, ) height = proto.Field( proto.INT32, number=3, ) blocks = proto.RepeatedField( proto.MESSAGE, number=4, message='Block', ) confidence = proto.Field( proto.FLOAT, number=5, ) class Block(proto.Message): r"""Logical element on the page. Attributes: property (google.cloud.vision_v1p2beta1.types.TextAnnotation.TextProperty): Additional information detected for the block. bounding_box (google.cloud.vision_v1p2beta1.types.BoundingPoly): The bounding box for the block. The vertices are in the order of top-left, top-right, bottom-right, bottom-left. When a rotation of the bounding box is detected the rotation is represented as around the top-left corner as defined when the text is read in the 'natural' orientation. For example: - when the text is horizontal it might look like: :: 0----1 | | 3----2 - when it's rotated 180 degrees around the top-left corner it becomes: :: 2----3 | | 1----0 and the vertice order will still be (0, 1, 2, 3). paragraphs (Sequence[google.cloud.vision_v1p2beta1.types.Paragraph]): List of paragraphs in this block (if this blocks is of type text). block_type (google.cloud.vision_v1p2beta1.types.Block.BlockType): Detected block type (text, image etc) for this block. confidence (float): Confidence of the OCR results on the block. Range [0, 1]. """ class BlockType(proto.Enum): r"""Type of a block (text, image etc) as identified by OCR.""" UNKNOWN = 0 TEXT = 1 TABLE = 2 PICTURE = 3 RULER = 4 BARCODE = 5 property = proto.Field( proto.MESSAGE, number=1, message='TextAnnotation.TextProperty', ) bounding_box = proto.Field( proto.MESSAGE, number=2, message=geometry.BoundingPoly, ) paragraphs = proto.RepeatedField( proto.MESSAGE, number=3, message='Paragraph', ) block_type = proto.Field( proto.ENUM, number=4, enum=BlockType, ) confidence = proto.Field( proto.FLOAT, number=5, ) class Paragraph(proto.Message): r"""Structural unit of text representing a number of words in certain order. Attributes: property (google.cloud.vision_v1p2beta1.types.TextAnnotation.TextProperty): Additional information detected for the paragraph. bounding_box (google.cloud.vision_v1p2beta1.types.BoundingPoly): The bounding box for the paragraph. The vertices are in the order of top-left, top-right, bottom-right, bottom-left. When a rotation of the bounding box is detected the rotation is represented as around the top-left corner as defined when the text is read in the 'natural' orientation. For example: - when the text is horizontal it might look like: 0----1 \| \| 3----2 - when it's rotated 180 degrees around the top-left corner it becomes: 2----3 \| \| 1----0 and the vertice order will still be (0, 1, 2, 3). words (Sequence[google.cloud.vision_v1p2beta1.types.Word]): List of words in this paragraph. confidence (float): Confidence of the OCR results for the paragraph. Range [0, 1]. """ property = proto.Field( proto.MESSAGE, number=1, message='TextAnnotation.TextProperty', ) bounding_box = proto.Field( proto.MESSAGE, number=2, message=geometry.BoundingPoly, ) words = proto.RepeatedField( proto.MESSAGE, number=3, message='Word', ) confidence = proto.Field( proto.FLOAT, number=4, ) class Word(proto.Message): r"""A word representation. Attributes: property (google.cloud.vision_v1p2beta1.types.TextAnnotation.TextProperty): Additional information detected for the word. bounding_box (google.cloud.vision_v1p2beta1.types.BoundingPoly): The bounding box for the word. The vertices are in the order of top-left, top-right, bottom-right, bottom-left. When a rotation of the bounding box is detected the rotation is represented as around the top-left corner as defined when the text is read in the 'natural' orientation. For example: - when the text is horizontal it might look like: 0----1 \| \| 3----2 - when it's rotated 180 degrees around the top-left corner it becomes: 2----3 \| \| 1----0 and the vertice order will still be (0, 1, 2, 3). symbols (Sequence[google.cloud.vision_v1p2beta1.types.Symbol]): List of symbols in the word. The order of the symbols follows the natural reading order. confidence (float): Confidence of the OCR results for the word. Range [0, 1]. """ property = proto.Field( proto.MESSAGE, number=1, message='TextAnnotation.TextProperty', ) bounding_box = proto.Field( proto.MESSAGE, number=2, message=geometry.BoundingPoly, ) symbols = proto.RepeatedField( proto.MESSAGE, number=3, message='Symbol', ) confidence = proto.Field( proto.FLOAT, number=4, ) class Symbol(proto.Message): r"""A single symbol representation. Attributes: property (google.cloud.vision_v1p2beta1.types.TextAnnotation.TextProperty): Additional information detected for the symbol. bounding_box (google.cloud.vision_v1p2beta1.types.BoundingPoly): The bounding box for the symbol. The vertices are in the order of top-left, top-right, bottom-right, bottom-left. When a rotation of the bounding box is detected the rotation is represented as around the top-left corner as defined when the text is read in the 'natural' orientation. For example: - when the text is horizontal it might look like: 0----1 \| \| 3----2 - when it's rotated 180 degrees around the top-left corner it becomes: 2----3 \| \| 1----0 and the vertice order will still be (0, 1, 2, 3). text (str): The actual UTF-8 representation of the symbol. confidence (float): Confidence of the OCR results for the symbol. Range [0, 1]. """ property = proto.Field( proto.MESSAGE, number=1, message='TextAnnotation.TextProperty', ) bounding_box = proto.Field( proto.MESSAGE, number=2, message=geometry.BoundingPoly, ) text = proto.Field( proto.STRING, number=3, ) confidence = proto.Field( proto.FLOAT, number=4, ) __all__ = tuple(sorted(__protobuf__.manifest))
unknown
codeparrot/codeparrot-clean
/* contrib/amcheck/amcheck--1.4--1.5.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "ALTER EXTENSION amcheck UPDATE TO '1.5'" to load this file. \quit -- gin_index_check() -- CREATE FUNCTION gin_index_check(index regclass) RETURNS VOID AS 'MODULE_PATHNAME', 'gin_index_check' LANGUAGE C STRICT; REVOKE ALL ON FUNCTION gin_index_check(regclass) FROM PUBLIC;
sql
github
https://github.com/postgres/postgres
contrib/amcheck/amcheck--1.4--1.5.sql
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Resampling methods for batches of tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.training import moving_averages def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False): """Given `inputs` tensors, stochastically resamples each at a given rate. For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates tensor contains `[3, 1]`, then the return value may look like `[[a1, a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are possible, since this is stochastic -- averaged over many repeated calls, each set of inputs should appear in the output `rate` times the number of invocations. Uses Knuth's method to generate samples from the poisson distribution (but instead of just incrementing a count, actually emits the input); this is described at https://en.wikipedia.org/wiki/Poisson_distribution in the section on generating Poisson-distributed random variables. Note that this method is not appropriate for large rate values: with float16 it will stop performing correctly for rates above 9.17; float32, 87; and float64, 708. (These are the base-e versions of the minimum representable exponent for each type.) Args: inputs: A list of tensors, each of which has a shape of `[batch_size, ...]` rates: A tensor of shape `[batch_size]` contiaining the resampling rates for each input. scope: Scope for the op. seed: Random seed to use. back_prop: Whether to allow back-propagation through this op. Returns: Selections from the input tensors. """ # TODO(shoutis): Refactor, splitting this up into a poisson draw and a repeat. # What this implementation does is loop, simulating the intervals # between events by drawing from the exponential distribution # (`-log(random_uniform)/rate`), and emitting another copy of the # corresponding input so long as sum(intervals) < 1. However, that # condition can be transformed into the easier-to-compute condition # `product(random_uniforms) > e^-rate`. with ops.name_scope(scope, default_name='resample_at_rate', values=inputs): floor_vals = math_ops.exp(-rates) def _body(chosen_inputs, running_products, idx, output_count): """Body of the resampling loop.""" # Update the running product next_running_products = running_products * random_ops.random_uniform( shape=array_ops.shape(running_products), seed=seed) # Append inputs which still pass the condition: indexes = array_ops.reshape( array_ops.where(next_running_products > floor_vals), [-1]) next_output_count = output_count + array_ops.shape(indexes)[0] next_chosen_inputs = [ chosen_inputs[i].write(idx, array_ops.gather(inputs[i], indexes)) for i in range(len(inputs))] return [next_chosen_inputs, next_running_products, idx + 1, next_output_count] def _cond(unused_chosen_inputs, running_products, unused_idx, unused_count): """Resampling loop exit condition.""" return math_ops.reduce_any(running_products > floor_vals) initial_chosen_inputs = [ tensor_array_ops.TensorArray(dtype=x.dtype, size=0, dynamic_size=True) for x in inputs] resampled_inputs, _, unused_idx, count = control_flow_ops.while_loop( _cond, _body, loop_vars=[initial_chosen_inputs, array_ops.ones_like(rates), # initial running_products 0, # initial idx 0], # initial count back_prop=back_prop) # Work around TensorArray "Currently only static shapes are supported when # concatenating zero-size TensorArrays" limitation: def _empty_tensor_like(t): result = array_ops.zeros( shape=(array_ops.concat(0, [[0], array_ops.shape(t)[1:]])), dtype=t.dtype) if t.get_shape().ndims is not None: # preserve known shapes result.set_shape([0] + t.get_shape()[1:].as_list()) return result return control_flow_ops.cond( count > 0, lambda: [tensor_array.concat() for tensor_array in resampled_inputs], lambda: [_empty_tensor_like(t) for t in inputs]) def weighted_resample(inputs, weights, overall_rate, scope=None, mean_decay=0.999, warmup=10, seed=None): """Performs an approximate weighted resampling of `inputs`. This method chooses elements from `inputs` where each item's rate of selection is proportional to its value in `weights`, and the average rate of selection across all inputs (and many invocations!) is `overall_rate`. Args: inputs: A list of tensors whose first dimension is `batch_size`. weights: A `[batch_size]`-shaped tensor with each batch member's weight. overall_rate: Desired overall rate of resampling. scope: Scope to use for the op. mean_decay: How quickly to decay the running estimate of the mean weight. warmup: Until the resulting tensor has been evaluated `warmup` times, the resampling menthod uses the true mean over all calls as its weight estimate, rather than a decayed mean. seed: Random seed. Returns: A list of tensors exactly like `inputs`, but with an unknown (and possibly zero) first dimension. A tensor containing the effective resampling rate used for each output. """ # Algorithm: Just compute rates as weights/mean_weight * # overall_rate. This way the average weight corresponds to the # overall rate, and a weight twice the average has twice the rate, # etc. with ops.name_scope(scope, 'weighted_resample', inputs) as opscope: # First: Maintain a running estimated mean weight, with decay # adjusted (by also maintaining an invocation count) during the # warmup period so that at the beginning, there aren't too many # zeros mixed in, throwing the average off. with variable_scope.variable_scope(scope, 'estimate_mean', inputs): count_so_far = variable_scope.get_local_variable( 'resample_count', initializer=0) estimated_mean = variable_scope.get_local_variable( 'estimated_mean', initializer=0.0) count = count_so_far.assign_add(1) real_decay = math_ops.minimum( math_ops.truediv((count - 1), math_ops.minimum(count, warmup)), mean_decay) batch_mean = math_ops.reduce_mean(weights) mean = moving_averages.assign_moving_average( estimated_mean, batch_mean, real_decay, zero_debias=False) # Then, normalize the weights into rates using the mean weight and # overall target rate: rates = weights * overall_rate / mean results = resample_at_rate([rates] + inputs, rates, scope=opscope, seed=seed, back_prop=False) return (results[1:], results[0])
unknown
codeparrot/codeparrot-clean
import gobject import os from filecollection import File from filecollection import Directory class PhoneBrowserStub(gobject.GObject): __gsignals__ = { "connected": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []), "disconnected": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []), "started": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [gobject.TYPE_STRING]), "completed": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []), "error": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [gobject.TYPE_STRING]), "progress": ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [gobject.TYPE_INT]) } def __init__(self): gobject.GObject.__init__(self) self.isConnected = False self.curDir = None self.lastCreatedDir = None self.lastFileCopied = None self.dirUp = False def connectToPhone(self, btAddress): self.isConnected = True def isConnected(self): return self.isConnected def disconnectFromPhone(self): self.isConnected = False def getDirectoryListing(self): return self.dirs, self.files def changeDirectory(self, dir): self.curDir = dir def createDirectory(self, dir): self.lastCreatedDir = dir def gotoRoot(self): self.curDir = '/' def directoryUp(self): self.dirUp = True def copyToLocal(self, remoteFilename, localDirectory): self.lastFileCopied = remoteFilename def copyToRemote(self, localFile): self.lastFileCopied = localFile
unknown
codeparrot/codeparrot-clean
from snovault import ( ROOT, upgrade_step, ) ''' This upgrade is no longer needed but just kept for posterity. It no longer works after versionof: was removed as a valid namespace in aliases. See http://redmine.encodedcc.org/issues/4748 @upgrade_step('analysis_step_run', '1', '2') def analysis_step_run_1_2(value, system): # http://redmine.encodedcc.org/issues/3074 root = system['registry'][ROOT] analysis_step_uuid = value.pop('analysis_step') analysis_step = root[analysis_step_uuid] analysis_step_version = root['versionof:{name}'.format(**analysis_step.properties)] value['analysis_step_version'] = str(analysis_step_version.uuid) # http://redmine.encodedcc.org/issues/3075 if 'workflow_run' in value: del value['workflow_run'] ''' @upgrade_step('analysis_step_run', '3', '4') def analysis_step_run_3_4(value, system): status = value.get('status') if status == 'error': value['status'] = 'deleted' elif status in ['waiting', 'running']: value['status'] = 'in progress' elif status == 'finished': value['status'] = 'released'
unknown
codeparrot/codeparrot-clean
import os # toolchains options ARCH='arm' CPU='cortex-m4' CROSS_TOOL='gcc' # bsp lib config BSP_LIBRARY_TYPE = None if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if os.getenv('RTT_ROOT'): RTT_ROOT = os.getenv('RTT_ROOT') # cross_tool provides the cross compiler # EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = r'C:\Users\XXYYZZ' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = r'C:/Keil_v5' elif CROSS_TOOL == 'iar': PLATFORM = 'iar' EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0' if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') BUILD = 'debug' if PLATFORM == 'gcc': # toolchains PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' CXX = PREFIX + 'g++' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections' CFLAGS = DEVICE + ' -Dgcc' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb ' LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2 -g' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O2' CXXFLAGS = CFLAGS POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' CXX = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --cpu Cortex-M4.fp ' CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99' AFLAGS = DEVICE + ' --apcs=interwork ' LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rtthread.map --strict' CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include' LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib' CFLAGS += ' -D__MICROLIB ' AFLAGS += ' --pd "__MICROLIB SETA 1" ' LFLAGS += ' --library_type=microlib ' EXEC_PATH += '/ARM/ARMCC/bin/' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' CXXFLAGS = CFLAGS CFLAGS += ' -std=c99' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' elif PLATFORM == 'iar': # toolchains CC = 'iccarm' CXX = 'iccarm' AS = 'iasmarm' AR = 'iarchive' LINK = 'ilinkarm' TARGET_EXT = 'out' DEVICE = '-Dewarm' CFLAGS = DEVICE CFLAGS += ' --diag_suppress Pa050' CFLAGS += ' --no_cse' CFLAGS += ' --no_unroll' CFLAGS += ' --no_inline' CFLAGS += ' --no_code_motion' CFLAGS += ' --no_tbaa' CFLAGS += ' --no_clustering' CFLAGS += ' --no_scheduling' CFLAGS += ' --endian=little' CFLAGS += ' --cpu=Cortex-M4' CFLAGS += ' -e' CFLAGS += ' --fpu=VFPv4_sp' CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"' CFLAGS += ' --silent' AFLAGS = DEVICE AFLAGS += ' -s+' AFLAGS += ' -w+' AFLAGS += ' -r' AFLAGS += ' --cpu Cortex-M4' AFLAGS += ' --fpu VFPv4_sp' AFLAGS += ' -S' if BUILD == 'debug': CFLAGS += ' --debug' CFLAGS += ' -On' else: CFLAGS += ' -Oh' LFLAGS = ' --config "board/linker_scripts/link.icf"' LFLAGS += ' --entry __iar_program_start' CXXFLAGS = CFLAGS EXEC_PATH = EXEC_PATH + '/arm/bin/' POST_ACTION = 'ielftool --bin $TARGET rtthread.bin' def dist_handle(BSP_ROOT): import sys cwd_path = os.getcwd() sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools')) from sdk_dist import dist_do_building dist_do_building(BSP_ROOT)
unknown
codeparrot/codeparrot-clean
import { execSync } from 'node:child_process' export function isRepoDirty(cwd?: string) { try { let stdout = execSync('git status --porcelain', { encoding: 'utf-8', cwd }) return stdout.trim() !== '' } catch (error) { // If it's not a git repository we don't know if it's dirty or not. But we // also don't want to block the migration. Maybe we can still fail and // require a `--force` flag? if (error?.toString?.().includes('not a git repository')) { return false } return true } }
typescript
github
https://github.com/tailwindlabs/tailwindcss
packages/@tailwindcss-upgrade/src/utils/git.ts
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #ifndef TENSORFLOW_CORE_KERNELS_RELU_OP_H_ #define TENSORFLOW_CORE_KERNELS_RELU_OP_H_ #define EIGEN_USE_THREADS #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/relu_op_functor.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { template <typename Device, typename T> class ReluOp : public UnaryElementWiseOp<T, ReluOp<Device, T>> { public: using UnaryElementWiseOp<T, ReluOp<Device, T>>::UnaryElementWiseOp; void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { functor::Relu<Device, T> functor; functor(context->eigen_device<Device>(), input.flat<T>(), output->flat<T>()); } }; // Out of line check to save code space (we have this code once, rather // than once for every NDIMS * NumTypes * Num_different_relu_variants // functions. struct ReluHelpers { static void ValidateSameSizeHelper(OpKernelContext* context, const Tensor& g, const Tensor& a) { OP_REQUIRES(context, a.IsSameSize(g), errors::InvalidArgument("g and a must be the same size")); } static bool ValidateSameSize(OpKernelContext* context, const Tensor& g, const Tensor& a) { ValidateSameSizeHelper(context, g, a); return context->status().ok(); } }; template <typename Device, typename T> class ReluGradOp : public BinaryElementWiseOp<T, ReluGradOp<Device, T>> { public: using BinaryElementWiseOp<T, ReluGradOp<Device, T>>::BinaryElementWiseOp; void OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output); // INPUTS: // g (gradients): backpropagated gradients // a (inputs): either the inputs that were passed to ReluOp(), or its // outputs (using either one yields the same result here). // OUTPUT: // gradients to backprop template <int NDIMS> void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { OperateNoTemplate(context, g, a, output); } }; template <typename Device, typename T> void ReluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { if (!ReluHelpers::ValidateSameSize(context, g, a)) return; functor::ReluGrad<Device, T> functor; functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), output->flat<T>()); } template <typename Device, typename T> class Relu6Op : public UnaryElementWiseOp<T, Relu6Op<Device, T>> { public: using UnaryElementWiseOp<T, Relu6Op<Device, T>>::UnaryElementWiseOp; void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { functor::Relu6<Device, T> functor; functor(context->eigen_device<Device>(), input.flat<T>(), output->flat<T>()); } }; template <typename Device, typename T> class Relu6GradOp : public BinaryElementWiseOp<T, Relu6GradOp<Device, T>> { public: using BinaryElementWiseOp<T, Relu6GradOp<Device, T>>::BinaryElementWiseOp; void OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output); // INPUTS: // g (gradients): backpropagated gradients // a (inputs): inputs that were passed to Relu6Op() // OUTPUT: // gradients to backprop template <int NDIMS> void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { OperateNoTemplate(context, g, a, output); } }; template <typename Device, typename T> void Relu6GradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { if (!ReluHelpers::ValidateSameSize(context, g, a)) return; functor::Relu6Grad<Device, T> functor; functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), output->flat<T>()); } template <typename Device, typename T> class LeakyReluOp : public UnaryElementWiseOp<T, LeakyReluOp<Device, T>> { public: explicit LeakyReluOp(OpKernelConstruction* context) : UnaryElementWiseOp<T, LeakyReluOp<Device, T>>(context) { float alpha_tmp; OP_REQUIRES_OK(context, context->GetAttr("alpha", &alpha_tmp)); alpha_ = T(alpha_tmp); } void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { functor::LeakyRelu<Device, T> functor; functor({context->eigen_device<Device>(), input.flat<T>(), alpha_, output->flat<T>()}); } private: T alpha_; }; template <typename Device, typename T> class LeakyReluGradOp : public BinaryElementWiseOp<T, LeakyReluGradOp<Device, T>> { public: explicit LeakyReluGradOp(OpKernelConstruction* context) : BinaryElementWiseOp<T, LeakyReluGradOp<Device, T>>(context) { float alpha_tmp; OP_REQUIRES_OK(context, context->GetAttr("alpha", &alpha_tmp)); alpha_ = T(alpha_tmp); } void OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, T alpha, Tensor* output); // INPUTS: // g (gradients): backpropagated gradients // a (inputs): either the inputs that were passed to LeakyReluOp(), or its // outputs (using either one yields the same result here). // OUTPUT: // gradients to backprop template <int NDIMS> void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { OperateNoTemplate(context, g, a, alpha_, output); } private: T alpha_; }; template <typename Device, typename T> void LeakyReluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, T alpha, Tensor* output) { if (!ReluHelpers::ValidateSameSize(context, g, a)) return; functor::LeakyReluGrad<Device, T> functor; functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), alpha, output->flat<T>()); }; template <typename Device, typename T> class EluOp : public UnaryElementWiseOp<T, EluOp<Device, T>> { public: using UnaryElementWiseOp<T, EluOp<Device, T>>::UnaryElementWiseOp; void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { functor::Elu<Device, T> functor; functor(context->eigen_device<Device>(), input.flat<T>(), output->flat<T>()); } }; template <typename Device, typename T> class EluGradOp : public BinaryElementWiseOp<T, EluGradOp<Device, T>> { public: using BinaryElementWiseOp<T, EluGradOp<Device, T>>::BinaryElementWiseOp; void OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output); // INPUTS: // g (gradients): backpropagated gradients // a (outputs): outputs of the EluOp() // OUTPUT: // gradients to backprop template <int NDIMS> void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { OperateNoTemplate(context, g, a, output); } }; template <typename Device, typename T> void EluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { if (!ReluHelpers::ValidateSameSize(context, g, a)) return; functor::EluGrad<Device, T> functor; functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), output->flat<T>()); } template <typename Device, typename T> class SeluOp : public UnaryElementWiseOp<T, SeluOp<Device, T>> { public: using UnaryElementWiseOp<T, SeluOp<Device, T>>::UnaryElementWiseOp; void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { functor::Selu<Device, T> functor; functor(context->eigen_device<Device>(), input.flat<T>(), output->flat<T>()); } }; template <typename Device, typename T> class SeluGradOp : public BinaryElementWiseOp<T, SeluGradOp<Device, T>> { public: using BinaryElementWiseOp<T, SeluGradOp<Device, T>>::BinaryElementWiseOp; void OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output); // INPUTS: // g (gradients): backpropagated gradients // a (outputs): outputs of the SeluOp() // OUTPUT: // gradients to backprop template <int NDIMS> void Operate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { OperateNoTemplate(context, g, a, output); } }; template <typename Device, typename T> void SeluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, const Tensor& g, const Tensor& a, Tensor* output) { if (!ReluHelpers::ValidateSameSize(context, g, a)) return; functor::SeluGrad<Device, T> functor; functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), output->flat<T>()); } } // namespace tensorflow #undef EIGEN_USE_THREADS #endif // TENSORFLOW_CORE_KERNELS_RELU_OP_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/kernels/relu_op.h
"""Unit tests for socket timeout feature.""" import functools import unittest from test import support from test.support import socket_helper import time import errno import socket @functools.lru_cache() def resolve_address(host, port): """Resolve an (host, port) to an address. We must perform name resolution before timeout tests, otherwise it will be performed by connect(). """ with socket_helper.transient_internet(host): return socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)[0][4] class CreationTestCase(unittest.TestCase): """Test case for socket.gettimeout() and socket.settimeout()""" def setUp(self): self.sock = self.enterContext( socket.socket(socket.AF_INET, socket.SOCK_STREAM)) def testObjectCreation(self): # Test Socket creation self.assertEqual(self.sock.gettimeout(), None, "timeout not disabled by default") def testFloatReturnValue(self): # Test return value of gettimeout() self.sock.settimeout(7.345) self.assertEqual(self.sock.gettimeout(), 7.345) self.sock.settimeout(3) self.assertEqual(self.sock.gettimeout(), 3) self.sock.settimeout(None) self.assertEqual(self.sock.gettimeout(), None) def testReturnType(self): # Test return type of gettimeout() self.sock.settimeout(1) self.assertIs(type(self.sock.gettimeout()), float) self.sock.settimeout(3.9) self.assertIs(type(self.sock.gettimeout()), float) def testTypeCheck(self): # Test type checking by settimeout() self.sock.settimeout(0) self.sock.settimeout(0) self.sock.settimeout(0.0) self.sock.settimeout(None) self.assertRaises(TypeError, self.sock.settimeout, "") self.assertRaises(TypeError, self.sock.settimeout, "") self.assertRaises(TypeError, self.sock.settimeout, ()) self.assertRaises(TypeError, self.sock.settimeout, []) self.assertRaises(TypeError, self.sock.settimeout, {}) self.assertRaises(TypeError, self.sock.settimeout, 0j) def testRangeCheck(self): # Test range checking by settimeout() self.assertRaises(ValueError, self.sock.settimeout, -1) self.assertRaises(ValueError, self.sock.settimeout, -1) self.assertRaises(ValueError, self.sock.settimeout, -1.0) def testTimeoutThenBlocking(self): # Test settimeout() followed by setblocking() self.sock.settimeout(10) self.sock.setblocking(True) self.assertEqual(self.sock.gettimeout(), None) self.sock.setblocking(False) self.assertEqual(self.sock.gettimeout(), 0.0) self.sock.settimeout(10) self.sock.setblocking(False) self.assertEqual(self.sock.gettimeout(), 0.0) self.sock.setblocking(True) self.assertEqual(self.sock.gettimeout(), None) def testBlockingThenTimeout(self): # Test setblocking() followed by settimeout() self.sock.setblocking(False) self.sock.settimeout(1) self.assertEqual(self.sock.gettimeout(), 1) self.sock.setblocking(True) self.sock.settimeout(1) self.assertEqual(self.sock.gettimeout(), 1) class TimeoutTestCase(unittest.TestCase): # There are a number of tests here trying to make sure that an operation # doesn't take too much longer than expected. But competing machine # activity makes it inevitable that such tests will fail at times. # When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K # and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real # solution. fuzz = 2.0 localhost = socket_helper.HOST def setUp(self): raise NotImplementedError() def _sock_operation(self, count, timeout, method, *args): """ Test the specified socket method. The method is run at most `count` times and must raise a TimeoutError within `timeout` + self.fuzz seconds. """ self.sock.settimeout(timeout) method = getattr(self.sock, method) for i in range(count): t1 = time.monotonic() try: method(*args) except TimeoutError as e: delta = time.monotonic() - t1 break else: self.fail('TimeoutError was not raised') # These checks should account for timing unprecision self.assertLess(delta, timeout + self.fuzz) self.assertGreater(delta, timeout - 1.0) class TCPTimeoutTestCase(TimeoutTestCase): """TCP test case for socket.socket() timeout functions""" def setUp(self): self.sock = self.enterContext( socket.socket(socket.AF_INET, socket.SOCK_STREAM)) self.addr_remote = resolve_address('www.python.org.', 80) def testConnectTimeout(self): # Testing connect timeout is tricky: we need to have IP connectivity # to a host that silently drops our packets. We can't simulate this # from Python because it's a function of the underlying TCP/IP stack. # So, the following port on the pythontest.net host has been defined: blackhole = resolve_address('pythontest.net', 56666) # Blackhole has been configured to silently drop any incoming packets. # No RSTs (for TCP) or ICMP UNREACH (for UDP/ICMP) will be sent back # to hosts that attempt to connect to this address: which is exactly # what we need to confidently test connect timeout. # However, we want to prevent false positives. It's not unreasonable # to expect certain hosts may not be able to reach the blackhole, due # to firewalling or general network configuration. In order to improve # our confidence in testing the blackhole, a corresponding 'whitehole' # has also been set up using one port higher: whitehole = resolve_address('pythontest.net', 56667) # This address has been configured to immediately drop any incoming # packets as well, but it does it respectfully with regards to the # incoming protocol. RSTs are sent for TCP packets, and ICMP UNREACH # is sent for UDP/ICMP packets. This means our attempts to connect to # it should be met immediately with ECONNREFUSED. The test case has # been structured around this premise: if we get an ECONNREFUSED from # the whitehole, we proceed with testing connect timeout against the # blackhole. If we don't, we skip the test (with a message about not # getting the required RST from the whitehole within the required # timeframe). # For the records, the whitehole/blackhole configuration has been set # up using the 'iptables' firewall, using the following rules: # # -A INPUT -p tcp --destination-port 56666 -j DROP # -A INPUT -p udp --destination-port 56666 -j DROP # -A INPUT -p tcp --destination-port 56667 -j REJECT # -A INPUT -p udp --destination-port 56667 -j REJECT # # See https://github.com/python/psf-salt/blob/main/pillar/base/firewall/snakebite.sls # for the current configuration. skip = True with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: timeout = support.LOOPBACK_TIMEOUT sock.settimeout(timeout) sock.connect((whitehole)) except TimeoutError: pass except OSError as err: if err.errno == errno.ECONNREFUSED: skip = False if skip: self.skipTest( "We didn't receive a connection reset (RST) packet from " "{}:{} within {} seconds, so we're unable to test connect " "timeout against the corresponding {}:{} (which is " "configured to silently drop packets)." .format( whitehole[0], whitehole[1], timeout, blackhole[0], blackhole[1], ) ) # All that hard work just to test if connect times out in 0.001s ;-) self.addr_remote = blackhole with socket_helper.transient_internet(self.addr_remote[0]): self._sock_operation(1, 0.001, 'connect', self.addr_remote) def testRecvTimeout(self): # Test recv() timeout with socket_helper.transient_internet(self.addr_remote[0]): self.sock.connect(self.addr_remote) self._sock_operation(1, 1.5, 'recv', 1024) def testAcceptTimeout(self): # Test accept() timeout socket_helper.bind_port(self.sock, self.localhost) self.sock.listen() self._sock_operation(1, 1.5, 'accept') def testSend(self): # Test send() timeout with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv: socket_helper.bind_port(serv, self.localhost) serv.listen() self.sock.connect(serv.getsockname()) # Send a lot of data in order to bypass buffering in the TCP stack. self._sock_operation(100, 1.5, 'send', b"X" * 200000) def testSendto(self): # Test sendto() timeout with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv: socket_helper.bind_port(serv, self.localhost) serv.listen() self.sock.connect(serv.getsockname()) # The address argument is ignored since we already connected. self._sock_operation(100, 1.5, 'sendto', b"X" * 200000, serv.getsockname()) def testSendall(self): # Test sendall() timeout with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv: socket_helper.bind_port(serv, self.localhost) serv.listen() self.sock.connect(serv.getsockname()) # Send a lot of data in order to bypass buffering in the TCP stack. self._sock_operation(100, 1.5, 'sendall', b"X" * 200000) class UDPTimeoutTestCase(TimeoutTestCase): """UDP test case for socket.socket() timeout functions""" def setUp(self): self.sock = self.enterContext( socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) def testRecvfromTimeout(self): # Test recvfrom() timeout # Prevent "Address already in use" socket exceptions socket_helper.bind_port(self.sock, self.localhost) self._sock_operation(1, 1.5, 'recvfrom', 1024) def setUpModule(): support.requires('network') support.requires_working_socket(module=True) if __name__ == "__main__": unittest.main()
python
github
https://github.com/python/cpython
Lib/test/test_timeout.py
#!/usr/bin/python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from crontabber.base import BaseCronApp from crontabber.mixins import ( with_postgres_transactions, with_single_postgres_transaction, with_transactional_resource ) from socorro.external.postgresql.dbapi2_util import execute_query_iter from socorro.lib.util import DotDict _reprocessing_sql = """ DELETE FROM reprocessing_jobs RETURNING crash_id """ @with_postgres_transactions() @with_single_postgres_transaction() @with_transactional_resource( 'socorro.external.rabbitmq.crashstorage.ReprocessingRabbitMQCrashStore', 'queuing' ) class ReprocessingJobsApp(BaseCronApp): app_name = 'reprocessing-jobs' app_description = ( "Retrieves crash_ids from reprocessing_jobs and submits" "to the reprocessing queue" ) app_version = '0.1' def run(self, connection): for crash_id, in execute_query_iter(connection, _reprocessing_sql): self.queuing_connection_factory.save_raw_crash( DotDict({'legacy_processing': 0}), [], crash_id )
unknown
codeparrot/codeparrot-clean
# # (c) 2017 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import json from itertools import chain from ansible.module_utils._text import to_bytes, to_text from ansible.module_utils.network.common.utils import to_list from ansible.plugins.cliconf import CliconfBase, enable_mode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class Cliconf(CliconfBase): def get_device_info(self): device_info = {} device_info['network_os'] = 'RouterOS' resource = self.get(b'/system resource print') data = to_text(resource, errors='surrogate_or_strict').strip() match = re.search(r'version: (\S+)', data) if match: device_info['network_os_version'] = match.group(1) routerboard = self.get(b'/system routerboard print') data = to_text(routerboard, errors='surrogate_or_strict').strip() match = re.search(r'model: (.+)$', data, re.M) if match: device_info['network_os_model'] = match.group(1) identity = self.get(b'/system identity print') data = to_text(identity, errors='surrogate_or_strict').strip() match = re.search(r'name: (.+)$', data, re.M) if match: device_info['network_os_hostname'] = match.group(1) return device_info def get_config(self, source='running', format='text', flags=None): return def edit_config(self, command): return def get(self, command, prompt=None, answer=None, sendonly=False): return self.send_command(command, prompt=prompt, answer=answer, sendonly=sendonly) def get_capabilities(self): result = {} result['rpc'] = self.get_base_rpc() result['network_api'] = 'cliconf' result['device_info'] = self.get_device_info() return json.dumps(result)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import crossovered_budget_report import analytic_account_budget_report import budget_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * * SPDX-License-Identifier: curl * ***************************************************************************/ #include "curl_setup.h" #if (defined(USE_CURL_NTLM_CORE) && !defined(USE_WINDOWS_SSPI)) || \ !defined(CURL_DISABLE_DIGEST_AUTH) #include "curl_md5.h" #include "curl_hmac.h" #ifdef USE_OPENSSL #include <openssl/opensslconf.h> #if !defined(OPENSSL_NO_MD5) && !defined(OPENSSL_NO_DEPRECATED_3_0) #define USE_OPENSSL_MD5 #endif #endif #ifdef USE_WOLFSSL #include <wolfssl/options.h> #ifndef NO_MD5 #define USE_WOLFSSL_MD5 #endif #endif #ifdef USE_MBEDTLS #include <mbedtls/version.h> #if MBEDTLS_VERSION_NUMBER < 0x03020000 #error "mbedTLS 3.2.0 or later required" #endif #include <psa/crypto_config.h> #if defined(PSA_WANT_ALG_MD5) && PSA_WANT_ALG_MD5 /* mbedTLS 4+ */ #define USE_MBEDTLS_MD5 #endif #endif #ifdef USE_GNUTLS #include <nettle/md5.h> #elif defined(USE_OPENSSL_MD5) #include <openssl/md5.h> #elif defined(USE_WOLFSSL_MD5) #include <wolfssl/openssl/md5.h> #elif defined(USE_MBEDTLS_MD5) #include <psa/crypto.h> #elif (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && \ (__MAC_OS_X_VERSION_MAX_ALLOWED >= 1040) && \ defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ (__MAC_OS_X_VERSION_MIN_REQUIRED < 101500)) || \ (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \ (__IPHONE_OS_VERSION_MAX_ALLOWED >= 20000) && \ defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ (__IPHONE_OS_VERSION_MIN_REQUIRED < 130000)) #define AN_APPLE_OS #include <CommonCrypto/CommonDigest.h> #elif defined(USE_WIN32_CRYPTO) #include <wincrypt.h> #endif #ifdef USE_GNUTLS typedef struct md5_ctx my_md5_ctx; static CURLcode my_md5_init(void *ctx) { md5_init(ctx); return CURLE_OK; } static void my_md5_update(void *ctx, const unsigned char *input, unsigned int len) { md5_update(ctx, len, input); } static void my_md5_final(unsigned char *digest, void *ctx) { md5_digest(ctx, 16, digest); } #elif defined(USE_OPENSSL_MD5) || \ (defined(USE_WOLFSSL_MD5) && !defined(OPENSSL_COEXIST)) typedef MD5_CTX my_md5_ctx; static CURLcode my_md5_init(void *ctx) { if(!MD5_Init(ctx)) return CURLE_OUT_OF_MEMORY; return CURLE_OK; } static void my_md5_update(void *ctx, const unsigned char *input, unsigned int len) { (void)MD5_Update(ctx, input, len); } static void my_md5_final(unsigned char *digest, void *ctx) { (void)MD5_Final(digest, ctx); } #elif defined(USE_WOLFSSL_MD5) typedef WOLFSSL_MD5_CTX my_md5_ctx; static CURLcode my_md5_init(void *ctx) { if(!wolfSSL_MD5_Init(ctx)) return CURLE_OUT_OF_MEMORY; return CURLE_OK; } static void my_md5_update(void *ctx, const unsigned char *input, unsigned int len) { (void)wolfSSL_MD5_Update(ctx, input, len); } static void my_md5_final(unsigned char *digest, void *ctx) { (void)wolfSSL_MD5_Final(digest, ctx); } #elif defined(USE_MBEDTLS_MD5) typedef psa_hash_operation_t my_md5_ctx; static CURLcode my_md5_init(void *ctx) { memset(ctx, 0, sizeof(my_md5_ctx)); if(psa_hash_setup(ctx, PSA_ALG_MD5) != PSA_SUCCESS) return CURLE_OUT_OF_MEMORY; return CURLE_OK; } static void my_md5_update(void *ctx, const unsigned char *input, unsigned int len) { (void)psa_hash_update(ctx, input, len); } static void my_md5_final(unsigned char *digest, void *ctx) { size_t actual_length; (void)psa_hash_finish(ctx, digest, 16, &actual_length); } #elif defined(AN_APPLE_OS) /* For Apple operating systems: CommonCrypto has the functions we need. These functions are available on Tiger and later, as well as iOS 2.0 and later. If you are building for an older cat, well, sorry. Declaring the functions as static like this seems to be a bit more reliable than defining COMMON_DIGEST_FOR_OPENSSL on older cats. */ # define my_md5_ctx CC_MD5_CTX static CURLcode my_md5_init(void *ctx) { if(!CC_MD5_Init(ctx)) return CURLE_OUT_OF_MEMORY; return CURLE_OK; } static void my_md5_update(void *ctx, const unsigned char *input, unsigned int len) { CC_MD5_Update(ctx, input, len); } static void my_md5_final(unsigned char *digest, void *ctx) { CC_MD5_Final(digest, ctx); } #elif defined(USE_WIN32_CRYPTO) struct md5_ctx { HCRYPTPROV hCryptProv; HCRYPTHASH hHash; }; typedef struct md5_ctx my_md5_ctx; static CURLcode my_md5_init(void *in) { my_md5_ctx *ctx = (my_md5_ctx *)in; if(!CryptAcquireContext(&ctx->hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) return CURLE_OUT_OF_MEMORY; if(!CryptCreateHash(ctx->hCryptProv, CALG_MD5, 0, 0, &ctx->hHash)) { CryptReleaseContext(ctx->hCryptProv, 0); ctx->hCryptProv = 0; return CURLE_FAILED_INIT; } return CURLE_OK; } static void my_md5_update(void *in, const unsigned char *input, unsigned int len) { my_md5_ctx *ctx = in; CryptHashData(ctx->hHash, (const BYTE *)input, len, 0); } static void my_md5_final(unsigned char *digest, void *in) { my_md5_ctx *ctx = (my_md5_ctx *)in; unsigned long length = 0; CryptGetHashParam(ctx->hHash, HP_HASHVAL, NULL, &length, 0); if(length == 16) CryptGetHashParam(ctx->hHash, HP_HASHVAL, digest, &length, 0); if(ctx->hHash) CryptDestroyHash(ctx->hHash); if(ctx->hCryptProv) CryptReleaseContext(ctx->hCryptProv, 0); } #else /* When no other crypto library is available we use this code segment */ /* * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc. * MD5 Message-Digest Algorithm (RFC 1321). * * Homepage: * https://openwall.info/wiki/people/solar/software/public-domain-source-code/md5 * * Author: * Alexander Peslyak, better known as Solar Designer <solar at openwall.com> * * This software was written by Alexander Peslyak in 2001. No copyright is * claimed, and the software is hereby placed in the public domain. In case * this attempt to disclaim copyright and place the software in the public * domain is deemed null and void, then the software is Copyright (c) 2001 * Alexander Peslyak and it is hereby released to the general public under * the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There is ABSOLUTELY NO WARRANTY, express or implied. * * (This is a heavily cut-down "BSD license".) */ struct md5_ctx { uint32_t lo, hi; uint32_t a, b, c, d; unsigned char buffer[64]; uint32_t block[16]; }; typedef struct md5_ctx my_md5_ctx; /* * The basic MD5 functions. * * F and G are optimized compared to their RFC 1321 definitions for * architectures that lack an AND-NOT instruction, just like in Colin Plumb's * implementation. */ #define MD5_F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) #define MD5_G(x, y, z) ((y) ^ ((z) & ((x) ^ (y)))) #define MD5_H(x, y, z) (((x) ^ (y)) ^ (z)) #define MD5_H2(x, y, z) ((x) ^ ((y) ^ (z))) #define MD5_I(x, y, z) ((y) ^ ((x) | ~(z))) /* * The MD5 transformation for all four rounds. */ #define MD5_STEP(f, a, b, c, d, x, t, s) \ (a) += f((b), (c), (d)) + (x) + (t); \ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \ (a) += (b); /* * SET reads 4 input bytes in little-endian byte order and stores them * in a properly aligned word in host byte order. * * The check for little-endian architectures that tolerate unaligned * memory accesses is just an optimization. Nothing will break if it * does not work. */ #if defined(__i386__) || defined(__x86_64__) || defined(__vax__) #define MD5_SET(n) (*(const uint32_t *)(const void *)&ptr[(n) * 4]) #define MD5_GET(n) MD5_SET(n) #else #define MD5_SET(n) (ctx->block[(n)] = \ (uint32_t)ptr[(n) * 4] | \ ((uint32_t)ptr[(n) * 4 + 1] << 8) | \ ((uint32_t)ptr[(n) * 4 + 2] << 16) | \ ((uint32_t)ptr[(n) * 4 + 3] << 24)) #define MD5_GET(n) (ctx->block[(n)]) #endif /* * This processes one or more 64-byte data blocks, but does NOT update * the bit counters. There are no alignment requirements. */ static const void *my_md5_body(my_md5_ctx *ctx, const void *data, unsigned long size) { const unsigned char *ptr; uint32_t a, b, c, d; ptr = (const unsigned char *)data; a = ctx->a; b = ctx->b; c = ctx->c; d = ctx->d; do { uint32_t saved_a, saved_b, saved_c, saved_d; saved_a = a; saved_b = b; saved_c = c; saved_d = d; /* Round 1 */ MD5_STEP(MD5_F, a, b, c, d, MD5_SET(0), 0xd76aa478, 7) MD5_STEP(MD5_F, d, a, b, c, MD5_SET(1), 0xe8c7b756, 12) MD5_STEP(MD5_F, c, d, a, b, MD5_SET(2), 0x242070db, 17) MD5_STEP(MD5_F, b, c, d, a, MD5_SET(3), 0xc1bdceee, 22) MD5_STEP(MD5_F, a, b, c, d, MD5_SET(4), 0xf57c0faf, 7) MD5_STEP(MD5_F, d, a, b, c, MD5_SET(5), 0x4787c62a, 12) MD5_STEP(MD5_F, c, d, a, b, MD5_SET(6), 0xa8304613, 17) MD5_STEP(MD5_F, b, c, d, a, MD5_SET(7), 0xfd469501, 22) MD5_STEP(MD5_F, a, b, c, d, MD5_SET(8), 0x698098d8, 7) MD5_STEP(MD5_F, d, a, b, c, MD5_SET(9), 0x8b44f7af, 12) MD5_STEP(MD5_F, c, d, a, b, MD5_SET(10), 0xffff5bb1, 17) MD5_STEP(MD5_F, b, c, d, a, MD5_SET(11), 0x895cd7be, 22) MD5_STEP(MD5_F, a, b, c, d, MD5_SET(12), 0x6b901122, 7) MD5_STEP(MD5_F, d, a, b, c, MD5_SET(13), 0xfd987193, 12) MD5_STEP(MD5_F, c, d, a, b, MD5_SET(14), 0xa679438e, 17) MD5_STEP(MD5_F, b, c, d, a, MD5_SET(15), 0x49b40821, 22) /* Round 2 */ MD5_STEP(MD5_G, a, b, c, d, MD5_GET(1), 0xf61e2562, 5) MD5_STEP(MD5_G, d, a, b, c, MD5_GET(6), 0xc040b340, 9) MD5_STEP(MD5_G, c, d, a, b, MD5_GET(11), 0x265e5a51, 14) MD5_STEP(MD5_G, b, c, d, a, MD5_GET(0), 0xe9b6c7aa, 20) MD5_STEP(MD5_G, a, b, c, d, MD5_GET(5), 0xd62f105d, 5) MD5_STEP(MD5_G, d, a, b, c, MD5_GET(10), 0x02441453, 9) MD5_STEP(MD5_G, c, d, a, b, MD5_GET(15), 0xd8a1e681, 14) MD5_STEP(MD5_G, b, c, d, a, MD5_GET(4), 0xe7d3fbc8, 20) MD5_STEP(MD5_G, a, b, c, d, MD5_GET(9), 0x21e1cde6, 5) MD5_STEP(MD5_G, d, a, b, c, MD5_GET(14), 0xc33707d6, 9) MD5_STEP(MD5_G, c, d, a, b, MD5_GET(3), 0xf4d50d87, 14) MD5_STEP(MD5_G, b, c, d, a, MD5_GET(8), 0x455a14ed, 20) MD5_STEP(MD5_G, a, b, c, d, MD5_GET(13), 0xa9e3e905, 5) MD5_STEP(MD5_G, d, a, b, c, MD5_GET(2), 0xfcefa3f8, 9) MD5_STEP(MD5_G, c, d, a, b, MD5_GET(7), 0x676f02d9, 14) MD5_STEP(MD5_G, b, c, d, a, MD5_GET(12), 0x8d2a4c8a, 20) /* Round 3 */ MD5_STEP(MD5_H, a, b, c, d, MD5_GET(5), 0xfffa3942, 4) MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(8), 0x8771f681, 11) MD5_STEP(MD5_H, c, d, a, b, MD5_GET(11), 0x6d9d6122, 16) MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(14), 0xfde5380c, 23) MD5_STEP(MD5_H, a, b, c, d, MD5_GET(1), 0xa4beea44, 4) MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(4), 0x4bdecfa9, 11) MD5_STEP(MD5_H, c, d, a, b, MD5_GET(7), 0xf6bb4b60, 16) MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(10), 0xbebfbc70, 23) MD5_STEP(MD5_H, a, b, c, d, MD5_GET(13), 0x289b7ec6, 4) MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(0), 0xeaa127fa, 11) MD5_STEP(MD5_H, c, d, a, b, MD5_GET(3), 0xd4ef3085, 16) MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(6), 0x04881d05, 23) MD5_STEP(MD5_H, a, b, c, d, MD5_GET(9), 0xd9d4d039, 4) MD5_STEP(MD5_H2, d, a, b, c, MD5_GET(12), 0xe6db99e5, 11) MD5_STEP(MD5_H, c, d, a, b, MD5_GET(15), 0x1fa27cf8, 16) MD5_STEP(MD5_H2, b, c, d, a, MD5_GET(2), 0xc4ac5665, 23) /* Round 4 */ MD5_STEP(MD5_I, a, b, c, d, MD5_GET(0), 0xf4292244, 6) MD5_STEP(MD5_I, d, a, b, c, MD5_GET(7), 0x432aff97, 10) MD5_STEP(MD5_I, c, d, a, b, MD5_GET(14), 0xab9423a7, 15) MD5_STEP(MD5_I, b, c, d, a, MD5_GET(5), 0xfc93a039, 21) MD5_STEP(MD5_I, a, b, c, d, MD5_GET(12), 0x655b59c3, 6) MD5_STEP(MD5_I, d, a, b, c, MD5_GET(3), 0x8f0ccc92, 10) MD5_STEP(MD5_I, c, d, a, b, MD5_GET(10), 0xffeff47d, 15) MD5_STEP(MD5_I, b, c, d, a, MD5_GET(1), 0x85845dd1, 21) MD5_STEP(MD5_I, a, b, c, d, MD5_GET(8), 0x6fa87e4f, 6) MD5_STEP(MD5_I, d, a, b, c, MD5_GET(15), 0xfe2ce6e0, 10) MD5_STEP(MD5_I, c, d, a, b, MD5_GET(6), 0xa3014314, 15) MD5_STEP(MD5_I, b, c, d, a, MD5_GET(13), 0x4e0811a1, 21) MD5_STEP(MD5_I, a, b, c, d, MD5_GET(4), 0xf7537e82, 6) MD5_STEP(MD5_I, d, a, b, c, MD5_GET(11), 0xbd3af235, 10) MD5_STEP(MD5_I, c, d, a, b, MD5_GET(2), 0x2ad7d2bb, 15) MD5_STEP(MD5_I, b, c, d, a, MD5_GET(9), 0xeb86d391, 21) a += saved_a; b += saved_b; c += saved_c; d += saved_d; ptr += 64; } while(size -= 64); ctx->a = a; ctx->b = b; ctx->c = c; ctx->d = d; return ptr; } static CURLcode my_md5_init(void *in) { my_md5_ctx *ctx = (my_md5_ctx *)in; ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; ctx->lo = 0; ctx->hi = 0; return CURLE_OK; } static void my_md5_update(void *in, const unsigned char *input, unsigned int len) { uint32_t saved_lo; unsigned int used; my_md5_ctx *ctx = (my_md5_ctx *)in; saved_lo = ctx->lo; ctx->lo = (saved_lo + len) & 0x1fffffff; if(ctx->lo < saved_lo) ctx->hi++; ctx->hi += (uint32_t)len >> 29; used = saved_lo & 0x3f; if(used) { unsigned int available = 64 - used; if(len < available) { memcpy(&ctx->buffer[used], input, len); return; } memcpy(&ctx->buffer[used], input, available); input = (const unsigned char *)input + available; len -= available; my_md5_body(ctx, ctx->buffer, 64); } if(len >= 64) { input = my_md5_body(ctx, input, len & ~(unsigned long)0x3f); len &= 0x3f; } memcpy(ctx->buffer, input, len); } static void my_md5_final(unsigned char *digest, void *in) { unsigned int used, available; my_md5_ctx *ctx = (my_md5_ctx *)in; used = ctx->lo & 0x3f; ctx->buffer[used++] = 0x80; available = 64 - used; if(available < 8) { memset(&ctx->buffer[used], 0, available); my_md5_body(ctx, ctx->buffer, 64); used = 0; available = 64; } memset(&ctx->buffer[used], 0, available - 8); ctx->lo <<= 3; ctx->buffer[56] = curlx_ultouc((ctx->lo) & 0xff); ctx->buffer[57] = curlx_ultouc((ctx->lo >> 8) & 0xff); ctx->buffer[58] = curlx_ultouc((ctx->lo >> 16) & 0xff); ctx->buffer[59] = curlx_ultouc(ctx->lo >> 24); ctx->buffer[60] = curlx_ultouc((ctx->hi) & 0xff); ctx->buffer[61] = curlx_ultouc((ctx->hi >> 8) & 0xff); ctx->buffer[62] = curlx_ultouc((ctx->hi >> 16) & 0xff); ctx->buffer[63] = curlx_ultouc(ctx->hi >> 24); my_md5_body(ctx, ctx->buffer, 64); digest[0] = curlx_ultouc((ctx->a) & 0xff); digest[1] = curlx_ultouc((ctx->a >> 8) & 0xff); digest[2] = curlx_ultouc((ctx->a >> 16) & 0xff); digest[3] = curlx_ultouc(ctx->a >> 24); digest[4] = curlx_ultouc((ctx->b) & 0xff); digest[5] = curlx_ultouc((ctx->b >> 8) & 0xff); digest[6] = curlx_ultouc((ctx->b >> 16) & 0xff); digest[7] = curlx_ultouc(ctx->b >> 24); digest[8] = curlx_ultouc((ctx->c) & 0xff); digest[9] = curlx_ultouc((ctx->c >> 8) & 0xff); digest[10] = curlx_ultouc((ctx->c >> 16) & 0xff); digest[11] = curlx_ultouc(ctx->c >> 24); digest[12] = curlx_ultouc((ctx->d) & 0xff); digest[13] = curlx_ultouc((ctx->d >> 8) & 0xff); digest[14] = curlx_ultouc((ctx->d >> 16) & 0xff); digest[15] = curlx_ultouc(ctx->d >> 24); memset(ctx, 0, sizeof(*ctx)); } #endif /* CRYPTO LIBS */ const struct HMAC_params Curl_HMAC_MD5 = { my_md5_init, /* Hash initialization function. */ my_md5_update, /* Hash update function. */ my_md5_final, /* Hash computation end function. */ sizeof(my_md5_ctx), /* Size of hash context structure. */ 64, /* Maximum key length. */ 16 /* Result size. */ }; const struct MD5_params Curl_DIGEST_MD5 = { my_md5_init, /* Digest initialization function */ my_md5_update, /* Digest update function */ my_md5_final, /* Digest computation end function */ sizeof(my_md5_ctx), /* Size of digest context struct */ 16 /* Result size */ }; /* * @unittest: 1601 * Returns CURLE_OK on success. */ CURLcode Curl_md5it(unsigned char *output, const unsigned char *input, const size_t len) { CURLcode result; my_md5_ctx ctx; result = my_md5_init(&ctx); if(!result) { my_md5_update(&ctx, input, curlx_uztoui(len)); my_md5_final(output, &ctx); } return result; } struct MD5_context *Curl_MD5_init(const struct MD5_params *md5params) { struct MD5_context *ctxt; /* Create MD5 context */ ctxt = curlx_malloc(sizeof(*ctxt)); if(!ctxt) return ctxt; ctxt->md5_hashctx = curlx_malloc(md5params->md5_ctxtsize); if(!ctxt->md5_hashctx) { curlx_free(ctxt); return NULL; } ctxt->md5_hash = md5params; if((*md5params->md5_init_func)(ctxt->md5_hashctx)) { curlx_free(ctxt->md5_hashctx); curlx_free(ctxt); return NULL; } return ctxt; } CURLcode Curl_MD5_update(struct MD5_context *context, const unsigned char *input, unsigned int len) { (*context->md5_hash->md5_update_func)(context->md5_hashctx, input, len); return CURLE_OK; } CURLcode Curl_MD5_final(struct MD5_context *context, unsigned char *result) { (*context->md5_hash->md5_final_func)(result, context->md5_hashctx); curlx_free(context->md5_hashctx); curlx_free(context); return CURLE_OK; } #endif /* Using NTLM (without SSPI) || Digest */
c
github
https://github.com/curl/curl
lib/md5.c
# Copyright 2021 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's global test environment behavior. A user can specify a global test environment via testing::AddGlobalTestEnvironment. Failures in the global environment should result in all unit tests being skipped. This script tests such functionality by invoking googletest-global-environment-unittest_ (a program written with Google Test). """ import re from googletest.test import gtest_test_utils def RunAndReturnOutput(args=None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess( [ gtest_test_utils.GetTestExecutablePath( 'googletest-global-environment-unittest_' ) ] + (args or []) ).output class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase): """Tests global test environment failures.""" def testEnvironmentSetUpFails(self): """Tests the behavior of not specifying the fail_fast.""" # Run the test. txt = RunAndReturnOutput() # We should see the text of the global environment setup error. self.assertIn('Canned environment setup error', txt) # Our test should have been skipped due to the error, and not treated as a # pass. self.assertIn('[ SKIPPED ] 1 test', txt) self.assertIn('[ PASSED ] 0 tests', txt) # The test case shouldn't have been run. self.assertNotIn('Unexpected call', txt) def testEnvironmentSetUpAndTornDownForEachRepeat(self): """Tests the behavior of test environments and gtest_repeat.""" # When --gtest_recreate_environments_when_repeating is true, the global test # environment should be set up and torn down for each iteration. txt = RunAndReturnOutput([ '--gtest_repeat=2', '--gtest_recreate_environments_when_repeating=true', ]) expected_pattern = ( '(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' 'Global test environment set-up.' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' 'Global test environment tear-down' '(.|\n)*' r'Repeating all tests \(iteration 2\)' '(.|\n)*' 'Global test environment set-up.' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' 'Global test environment tear-down' '(.|\n)*' ) self.assertRegex(txt, expected_pattern) def testEnvironmentSetUpAndTornDownOnce(self): """Tests environment and --gtest_recreate_environments_when_repeating.""" # By default the environment should only be set up and torn down once, at # the start and end of the test respectively. txt = RunAndReturnOutput( [ '--gtest_repeat=2', ] ) expected_pattern = ( '(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' 'Global test environment set-up.' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' r'Repeating all tests \(iteration 2\)' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' 'Global test environment tear-down' '(.|\n)*' ) self.assertRegex(txt, expected_pattern) self.assertEqual(len(re.findall('Global test environment set-up', txt)), 1) self.assertEqual( len(re.findall('Global test environment tear-down', txt)), 1 ) if __name__ == '__main__': gtest_test_utils.Main()
python
github
https://github.com/google/googletest
googletest/test/googletest-global-environment-unittest.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BertGeneration model configuration """ from ...configuration_utils import PretrainedConfig class BertGenerationConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.BertGenerationPreTrainedModel`. It is used to instantiate a BertGeneration model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 50358): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.BertGeneration`. hidden_size (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If :obj:`True`, use gradient checkpointing to save memory at the expense of slower backward pass. position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`): Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`, :obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on :obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.) <https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to `Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.) <https://arxiv.org/abs/2009.13658>`__. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if ``config.is_decoder=True``. Examples:: >>> from transformers import BertGenerationConfig, BertGenerationEncoder >>> # Initializing a BertGeneration config >>> configuration = BertGenerationConfig() >>> # Initializing a model from the config >>> model = BertGenerationEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "bert-generation" def __init__( self, vocab_size=50358, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=2, eos_token_id=1, gradient_checkpointing=False, position_embedding_type="absolute", use_cache=True, **kwargs ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.gradient_checkpointing = gradient_checkpointing self.position_embedding_type = position_embedding_type self.use_cache = use_cache
unknown
codeparrot/codeparrot-clean
#define _XOPEN_SOURCE 700 #include "redismodule.h" #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <time.h> #define UNUSED(x) (void)(x) typedef struct { /* Mutex for protecting RedisModule_BlockedClientMeasureTime*() API from race * conditions due to timeout callback triggered in the main thread. */ pthread_mutex_t measuretime_mutex; int measuretime_completed; /* Indicates that time measure has ended and will not continue further */ int myint; /* Used for replying */ } BlockPrivdata; void blockClientPrivdataInit(RedisModuleBlockedClient *bc) { BlockPrivdata *block_privdata = RedisModule_Calloc(1, sizeof(*block_privdata)); block_privdata->measuretime_mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER; RedisModule_BlockClientSetPrivateData(bc, block_privdata); } void blockClientMeasureTimeStart(RedisModuleBlockedClient *bc, BlockPrivdata *block_privdata) { pthread_mutex_lock(&block_privdata->measuretime_mutex); RedisModule_BlockedClientMeasureTimeStart(bc); pthread_mutex_unlock(&block_privdata->measuretime_mutex); } void blockClientMeasureTimeEnd(RedisModuleBlockedClient *bc, BlockPrivdata *block_privdata, int completed) { pthread_mutex_lock(&block_privdata->measuretime_mutex); if (!block_privdata->measuretime_completed) { RedisModule_BlockedClientMeasureTimeEnd(bc); if (completed) block_privdata->measuretime_completed = 1; } pthread_mutex_unlock(&block_privdata->measuretime_mutex); } /* Reply callback for blocking command BLOCK.DEBUG */ int HelloBlock_Reply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { UNUSED(argv); UNUSED(argc); BlockPrivdata *block_privdata = RedisModule_GetBlockedClientPrivateData(ctx); return RedisModule_ReplyWithLongLong(ctx,block_privdata->myint); } /* Timeout callback for blocking command BLOCK.DEBUG */ int HelloBlock_Timeout(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { UNUSED(argv); UNUSED(argc); RedisModuleBlockedClient *bc = RedisModule_GetBlockedClientHandle(ctx); BlockPrivdata *block_privdata = RedisModule_GetBlockedClientPrivateData(ctx); blockClientMeasureTimeEnd(bc, block_privdata, 1); return RedisModule_ReplyWithSimpleString(ctx,"Request timedout"); } /* Private data freeing callback for BLOCK.DEBUG command. */ void HelloBlock_FreeData(RedisModuleCtx *ctx, void *privdata) { UNUSED(ctx); BlockPrivdata *block_privdata = privdata; pthread_mutex_destroy(&block_privdata->measuretime_mutex); RedisModule_Free(privdata); } /* Private data freeing callback for BLOCK.BLOCK command. */ void HelloBlock_FreeStringData(RedisModuleCtx *ctx, void *privdata) { RedisModule_FreeString(ctx, (RedisModuleString*)privdata); } /* The thread entry point that actually executes the blocking part * of the command BLOCK.DEBUG. */ void *BlockDebug_ThreadMain(void *arg) { void **targ = arg; RedisModuleBlockedClient *bc = targ[0]; long long delay = (unsigned long)targ[1]; long long enable_time_track = (unsigned long)targ[2]; BlockPrivdata *block_privdata = RedisModule_BlockClientGetPrivateData(bc); if (enable_time_track) blockClientMeasureTimeStart(bc, block_privdata); RedisModule_Free(targ); struct timespec ts; ts.tv_sec = delay / 1000; ts.tv_nsec = (delay % 1000) * 1000000; nanosleep(&ts, NULL); if (enable_time_track) blockClientMeasureTimeEnd(bc, block_privdata, 0); block_privdata->myint = rand(); RedisModule_UnblockClient(bc,block_privdata); return NULL; } /* The thread entry point that actually executes the blocking part * of the command BLOCK.DOUBLE_DEBUG. */ void *DoubleBlock_ThreadMain(void *arg) { void **targ = arg; RedisModuleBlockedClient *bc = targ[0]; long long delay = (unsigned long)targ[1]; BlockPrivdata *block_privdata = RedisModule_BlockClientGetPrivateData(bc); blockClientMeasureTimeStart(bc, block_privdata); RedisModule_Free(targ); struct timespec ts; ts.tv_sec = delay / 1000; ts.tv_nsec = (delay % 1000) * 1000000; nanosleep(&ts, NULL); blockClientMeasureTimeEnd(bc, block_privdata, 0); /* call again RedisModule_BlockedClientMeasureTimeStart() and * RedisModule_BlockedClientMeasureTimeEnd and ensure that the * total execution time is 2x the delay. */ blockClientMeasureTimeStart(bc, block_privdata); nanosleep(&ts, NULL); blockClientMeasureTimeEnd(bc, block_privdata, 0); block_privdata->myint = rand(); RedisModule_UnblockClient(bc,block_privdata); return NULL; } void HelloBlock_Disconnected(RedisModuleCtx *ctx, RedisModuleBlockedClient *bc) { RedisModule_Log(ctx,"warning","Blocked client %p disconnected!", (void*)bc); } /* BLOCK.DEBUG <delay_ms> <timeout_ms> -- Block for <count> milliseconds, then reply with * a random number. Timeout is the command timeout, so that you can test * what happens when the delay is greater than the timeout. */ int HelloBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (argc != 3) return RedisModule_WrongArity(ctx); long long delay; long long timeout; if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) { return RedisModule_ReplyWithError(ctx,"ERR invalid count"); } if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK) { return RedisModule_ReplyWithError(ctx,"ERR invalid count"); } pthread_t tid; RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout); blockClientPrivdataInit(bc); /* Here we set a disconnection handler, however since this module will * block in sleep() in a thread, there is not much we can do in the * callback, so this is just to show you the API. */ RedisModule_SetDisconnectCallback(bc,HelloBlock_Disconnected); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the delay and a reference to the blocked client handle. */ void **targ = RedisModule_Alloc(sizeof(void*)*3); targ[0] = bc; targ[1] = (void*)(unsigned long) delay; // pass 1 as flag to enable time tracking targ[2] = (void*)(unsigned long) 1; if (pthread_create(&tid,NULL,BlockDebug_ThreadMain,targ) != 0) { RedisModule_AbortBlock(bc); return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread"); } pthread_detach(tid); return REDISMODULE_OK; } /* BLOCK.DEBUG_NOTRACKING <delay_ms> <timeout_ms> -- Block for <count> milliseconds, then reply with * a random number. Timeout is the command timeout, so that you can test * what happens when the delay is greater than the timeout. * this command does not track background time so the background time should no appear in stats*/ int HelloBlockNoTracking_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (argc != 3) return RedisModule_WrongArity(ctx); long long delay; long long timeout; if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) { return RedisModule_ReplyWithError(ctx,"ERR invalid count"); } if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK) { return RedisModule_ReplyWithError(ctx,"ERR invalid count"); } pthread_t tid; RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout); blockClientPrivdataInit(bc); /* Here we set a disconnection handler, however since this module will * block in sleep() in a thread, there is not much we can do in the * callback, so this is just to show you the API. */ RedisModule_SetDisconnectCallback(bc,HelloBlock_Disconnected); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the delay and a reference to the blocked client handle. */ void **targ = RedisModule_Alloc(sizeof(void*)*3); targ[0] = bc; targ[1] = (void*)(unsigned long) delay; // pass 0 as flag to enable time tracking targ[2] = (void*)(unsigned long) 0; if (pthread_create(&tid,NULL,BlockDebug_ThreadMain,targ) != 0) { RedisModule_AbortBlock(bc); return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread"); } pthread_detach(tid); return REDISMODULE_OK; } /* BLOCK.DOUBLE_DEBUG <delay_ms> -- Block for 2 x <count> milliseconds, * then reply with a random number. * This command is used to test multiple calls to RedisModule_BlockedClientMeasureTimeStart() * and RedisModule_BlockedClientMeasureTimeEnd() within the same execution. */ int HelloDoubleBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (argc != 2) return RedisModule_WrongArity(ctx); long long delay; if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) { return RedisModule_ReplyWithError(ctx,"ERR invalid count"); } pthread_t tid; RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,0); blockClientPrivdataInit(bc); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the delay and a reference to the blocked client handle. */ void **targ = RedisModule_Alloc(sizeof(void*)*2); targ[0] = bc; targ[1] = (void*)(unsigned long) delay; if (pthread_create(&tid,NULL,DoubleBlock_ThreadMain,targ) != 0) { RedisModule_AbortBlock(bc); return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread"); } pthread_detach(tid); return REDISMODULE_OK; } RedisModuleBlockedClient *blocked_client = NULL; /* BLOCK.BLOCK [TIMEOUT] -- Blocks the current client until released * or TIMEOUT seconds. If TIMEOUT is zero, no timeout function is * registered. */ int Block_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (RedisModule_IsBlockedReplyRequest(ctx)) { RedisModuleString *r = RedisModule_GetBlockedClientPrivateData(ctx); return RedisModule_ReplyWithString(ctx, r); } else if (RedisModule_IsBlockedTimeoutRequest(ctx)) { RedisModule_UnblockClient(blocked_client, NULL); /* Must be called to avoid leaks. */ blocked_client = NULL; return RedisModule_ReplyWithSimpleString(ctx, "Timed out"); } if (argc != 2) return RedisModule_WrongArity(ctx); long long timeout; if (RedisModule_StringToLongLong(argv[1], &timeout) != REDISMODULE_OK) { return RedisModule_ReplyWithError(ctx, "ERR invalid timeout"); } if (blocked_client) { return RedisModule_ReplyWithError(ctx, "ERR another client already blocked"); } /* Block client. We use this function as both a reply and optional timeout * callback and differentiate the different code flows above. */ blocked_client = RedisModule_BlockClient(ctx, Block_RedisCommand, timeout > 0 ? Block_RedisCommand : NULL, HelloBlock_FreeStringData, timeout); return REDISMODULE_OK; } /* BLOCK.IS_BLOCKED -- Returns 1 if we have a blocked client, or 0 otherwise. */ int IsBlocked_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { UNUSED(argv); UNUSED(argc); RedisModule_ReplyWithLongLong(ctx, blocked_client ? 1 : 0); return REDISMODULE_OK; } /* BLOCK.RELEASE [reply] -- Releases the blocked client and produce the specified reply. */ int Release_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (argc != 2) return RedisModule_WrongArity(ctx); if (!blocked_client) { return RedisModule_ReplyWithError(ctx, "ERR No blocked client"); } RedisModuleString *replystr = argv[1]; RedisModule_RetainString(ctx, replystr); RedisModule_UnblockClient(blocked_client, replystr); blocked_client = NULL; RedisModule_ReplyWithSimpleString(ctx, "OK"); return REDISMODULE_OK; } int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { UNUSED(argv); UNUSED(argc); if (RedisModule_Init(ctx,"block",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"block.debug", HelloBlock_RedisCommand,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"block.double_debug", HelloDoubleBlock_RedisCommand,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"block.debug_no_track", HelloBlockNoTracking_RedisCommand,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx, "block.block", Block_RedisCommand, "", 0, 0, 0) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"block.is_blocked", IsBlocked_RedisCommand,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"block.release", Release_RedisCommand,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; return REDISMODULE_OK; }
c
github
https://github.com/redis/redis
tests/modules/blockonbackground.c
import datetime import decimal import unicodedata from importlib import import_module from django.conf import settings from django.utils import dateformat, datetime_safe, numberformat, six from django.utils.encoding import force_str from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import ( check_for_language, get_language, to_locale, ) # format_cache is a mapping from (format_type, lang) to the format string. # By using the cache, it is possible to avoid running get_format_modules # repeatedly. _format_cache = {} _format_modules_cache = {} ISO_INPUT_FORMATS = { 'DATE_INPUT_FORMATS': ['%Y-%m-%d'], 'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'], 'DATETIME_INPUT_FORMATS': [ '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M', '%Y-%m-%d' ], } def reset_format_cache(): """Clear any cached formats. This method is provided primarily for testing purposes, so that the effects of cached formats can be removed. """ global _format_cache, _format_modules_cache _format_cache = {} _format_modules_cache = {} def iter_format_modules(lang, format_module_path=None): """ Does the heavy lifting of finding format modules. """ if not check_for_language(lang): return if format_module_path is None: format_module_path = settings.FORMAT_MODULE_PATH format_locations = [] if format_module_path: if isinstance(format_module_path, six.string_types): format_module_path = [format_module_path] for path in format_module_path: format_locations.append(path + '.%s') format_locations.append('django.conf.locale.%s') locale = to_locale(lang) locales = [locale] if '_' in locale: locales.append(locale.split('_')[0]) for location in format_locations: for loc in locales: try: yield import_module('%s.formats' % (location % loc)) except ImportError: pass def get_format_modules(lang=None, reverse=False): """ Returns a list of the format modules found """ if lang is None: lang = get_language() modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))) if reverse: return list(reversed(modules)) return modules def get_format(format_type, lang=None, use_l10n=None): """ For a specific format type, returns the format for the current language (locale), defaults to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT' If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ format_type = force_str(format_type) if use_l10n or (use_l10n is None and settings.USE_L10N): if lang is None: lang = get_language() cache_key = (format_type, lang) try: cached = _format_cache[cache_key] if cached is not None: return cached else: # Return the general setting by default return getattr(settings, format_type) except KeyError: for module in get_format_modules(lang): try: val = getattr(module, format_type) for iso_input in ISO_INPUT_FORMATS.get(format_type, ()): if iso_input not in val: if isinstance(val, tuple): val = list(val) val.append(iso_input) _format_cache[cache_key] = val return val except AttributeError: pass _format_cache[cache_key] = None return getattr(settings, format_type) get_format_lazy = lazy(get_format, six.text_type, list, tuple) def date_format(value, format=None, use_l10n=None): """ Formats a datetime.date or datetime.datetime object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n)) def time_format(value, format=None, use_l10n=None): """ Formats a datetime.time object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n)) def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False): """ Formats a numeric value using localization settings If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if use_l10n or (use_l10n is None and settings.USE_L10N): lang = get_language() else: lang = None return numberformat.format( value, get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n), decimal_pos, get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n), get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n), force_grouping=force_grouping ) def localize(value, use_l10n=None): """ Checks if value is a localizable type (date, number...) and returns it formatted as a string using current locale format. If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if isinstance(value, bool): return mark_safe(six.text_type(value)) elif isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.datetime): return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n) elif isinstance(value, datetime.date): return date_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.time): return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n) else: return value def localize_input(value, default=None): """ Checks if an input value is a localizable type and returns it formatted with the appropriate formatting string of the current locale. """ if isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value) elif isinstance(value, datetime.datetime): value = datetime_safe.new_datetime(value) format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.date): value = datetime_safe.new_date(value) format = force_str(default or get_format('DATE_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.time): format = force_str(default or get_format('TIME_INPUT_FORMATS')[0]) return value.strftime(format) return value def sanitize_separators(value): """ Sanitizes a value according to the current decimal and thousand separator setting. Used with form field input. """ if settings.USE_L10N and isinstance(value, six.string_types): parts = [] decimal_separator = get_format('DECIMAL_SEPARATOR') if decimal_separator in value: value, decimals = value.split(decimal_separator, 1) parts.append(decimals) if settings.USE_THOUSAND_SEPARATOR: thousand_sep = get_format('THOUSAND_SEPARATOR') if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3: # Special case where we suspect a dot meant decimal separator (see #22171) pass else: for replacement in { thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}: value = value.replace(replacement, '') parts.append(value) value = '.'.join(reversed(parts)) return value
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.tosfs.commit.mapred; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.tosfs.commit.CommitUtils; import org.apache.hadoop.mapred.FileOutputCommitter; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; import org.apache.hadoop.mapred.JobStatus; import org.apache.hadoop.mapred.TaskAttemptContext; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; public class Committer extends FileOutputCommitter { private static final Logger LOG = LoggerFactory.getLogger(Committer.class); private OutputCommitter wrapped = null; private static Path getOutputPath(JobContext context) { JobConf conf = context.getJobConf(); return FileOutputFormat.getOutputPath(conf); } private static Path getOutputPath(TaskAttemptContext context) { JobConf conf = context.getJobConf(); return FileOutputFormat.getOutputPath(conf); } private OutputCommitter getWrapped(JobContext context) throws IOException { if (wrapped == null) { wrapped = CommitUtils.supportObjectStorageCommit(context.getConfiguration(), getOutputPath(context)) ? new org.apache.hadoop.fs.tosfs.commit.Committer(getOutputPath(context), context) : new FileOutputCommitter(); LOG.debug("Using OutputCommitter implementation {}", wrapped.getClass().getName()); } return wrapped; } @InterfaceAudience.Private @Override public Path getTaskAttemptPath(TaskAttemptContext context) throws IOException { Path out = getOutputPath(context); return out == null ? null : getTaskAttemptPath(context, out); } private OutputCommitter getWrapped(TaskAttemptContext context) throws IOException { if (wrapped == null) { wrapped = CommitUtils.supportObjectStorageCommit(context.getConfiguration(), getOutputPath(context)) ? new org.apache.hadoop.fs.tosfs.commit.Committer(getOutputPath(context), context) : new FileOutputCommitter(); } return wrapped; } @Override public Path getWorkPath(TaskAttemptContext context, Path outputPath) throws IOException { if (getWrapped(context) instanceof org.apache.hadoop.fs.tosfs.commit.Committer) { return ((org.apache.hadoop.fs.tosfs.commit.Committer) getWrapped(context)).getWorkPath(); } return super.getWorkPath(context, outputPath); } private Path getTaskAttemptPath(TaskAttemptContext context, Path out) throws IOException { Path workPath = FileOutputFormat.getWorkOutputPath(context.getJobConf()); if(workPath == null && out != null) { if (getWrapped(context) instanceof org.apache.hadoop.fs.tosfs.commit.Committer) { return CommitUtils.magicTaskAttemptPath(context, getOutputPath(context)); } else { return org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter .getTaskAttemptPath(context, out); } } return workPath; } @Override public void setupJob(JobContext context) throws IOException { getWrapped(context).setupJob(context); } @Override public void commitJob(JobContext context) throws IOException { getWrapped(context).commitJob(context); } @Override @Deprecated public void cleanupJob(JobContext context) throws IOException { getWrapped(context).cleanupJob(context); } @Override public void abortJob(JobContext context, int runState) throws IOException { JobStatus.State state; if(runState == JobStatus.State.RUNNING.getValue()) { state = JobStatus.State.RUNNING; } else if(runState == JobStatus.State.SUCCEEDED.getValue()) { state = JobStatus.State.SUCCEEDED; } else if(runState == JobStatus.State.FAILED.getValue()) { state = JobStatus.State.FAILED; } else if(runState == JobStatus.State.PREP.getValue()) { state = JobStatus.State.PREP; } else if(runState == JobStatus.State.KILLED.getValue()) { state = JobStatus.State.KILLED; } else { throw new IllegalArgumentException(runState+" is not a valid runState."); } getWrapped(context).abortJob(context, state); } @Override public void setupTask(TaskAttemptContext context) throws IOException { getWrapped(context).setupTask(context); } @Override public void commitTask(TaskAttemptContext context) throws IOException { getWrapped(context).commitTask(context); } @Override public void abortTask(TaskAttemptContext context) throws IOException { getWrapped(context).abortTask(context); } @Override public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { return getWrapped(context).needsTaskCommit(context); } @Override @Deprecated public boolean isRecoverySupported() { return false; } @Override public boolean isCommitJobRepeatable(JobContext context) throws IOException { return getWrapped(context).isCommitJobRepeatable(context); } @Override public boolean isRecoverySupported(JobContext context) throws IOException { return getWrapped(context).isRecoverySupported(context); } @Override public void recoverTask(TaskAttemptContext context) throws IOException { getWrapped(context).recoverTask(context); } public String jobId() { Preconditions.checkNotNull(wrapped, "Encountered uninitialized job committer."); return wrapped instanceof org.apache.hadoop.fs.tosfs.commit.Committer ? ((org.apache.hadoop.fs.tosfs.commit.Committer) wrapped).jobId() : null; } public Path getWorkPath() { Preconditions.checkNotNull(wrapped, "Encountered uninitialized job committer."); return wrapped instanceof org.apache.hadoop.fs.tosfs.commit.Committer ? ((org.apache.hadoop.fs.tosfs.commit.Committer) wrapped).getWorkPath() : null; } }
java
github
https://github.com/apache/hadoop
hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/commit/mapred/Committer.java
"""HMAC (Keyed-Hashing for Message Authentication) module. Implements the HMAC algorithm as described by RFC 2104. """ try: import _hashlib as _hashopenssl except ImportError: _hashopenssl = None _functype = None from _operator import _compare_digest as compare_digest else: compare_digest = _hashopenssl.compare_digest _functype = type(_hashopenssl.openssl_sha256) # builtin type try: import _hmac except ImportError: _hmac = None trans_5C = bytes((x ^ 0x5C) for x in range(256)) trans_36 = bytes((x ^ 0x36) for x in range(256)) # The size of the digests returned by HMAC depends on the underlying # hashing module used. Use digest_size from the instance of HMAC instead. digest_size = None def _is_shake_constructor(digest_like): if isinstance(digest_like, str): name = digest_like else: h = digest_like() if callable(digest_like) else digest_like.new() if not isinstance(name := getattr(h, "name", None), str): return False return name.startswith(("shake", "SHAKE")) def _get_digest_constructor(digest_like): if callable(digest_like): return digest_like if isinstance(digest_like, str): def digest_wrapper(d=b''): import hashlib return hashlib.new(digest_like, d) else: def digest_wrapper(d=b''): return digest_like.new(d) return digest_wrapper class HMAC: """RFC 2104 HMAC class. Also complies with RFC 4231. This supports the API for Cryptographic Hash Functions (PEP 247). """ # Note: self.blocksize is the default blocksize; self.block_size # is effective block size as well as the public API attribute. blocksize = 64 # 512-bit HMAC; can be changed in subclasses. __slots__ = ( "_hmac", "_inner", "_outer", "block_size", "digest_size" ) def __init__(self, key, msg=None, digestmod=''): """Create a new HMAC object. key: bytes or buffer, key for the keyed hash object. msg: bytes or buffer, Initial input for the hash or None. digestmod: A hash name suitable for hashlib.new(). *OR* A hashlib constructor returning a new hash object. *OR* A module supporting PEP 247. Required as of 3.8, despite its position after the optional msg argument. Passing it as a keyword argument is recommended, though not required for legacy API reasons. """ if not isinstance(key, (bytes, bytearray)): raise TypeError(f"key: expected bytes or bytearray, " f"but got {type(key).__name__!r}") if not digestmod: raise TypeError("Missing required argument 'digestmod'.") self.__init(key, msg, digestmod) def __init(self, key, msg, digestmod): if _hashopenssl and isinstance(digestmod, (str, _functype)): try: self._init_openssl_hmac(key, msg, digestmod) return except _hashopenssl.UnsupportedDigestmodError: # pragma: no cover pass if _hmac and isinstance(digestmod, str): try: self._init_builtin_hmac(key, msg, digestmod) return except _hmac.UnknownHashError: # pragma: no cover pass self._init_old(key, msg, digestmod) def _init_openssl_hmac(self, key, msg, digestmod): self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod) self._inner = self._outer = None # because the slots are defined self.digest_size = self._hmac.digest_size self.block_size = self._hmac.block_size _init_hmac = _init_openssl_hmac # for backward compatibility (if any) def _init_builtin_hmac(self, key, msg, digestmod): self._hmac = _hmac.new(key, msg, digestmod=digestmod) self._inner = self._outer = None # because the slots are defined self.digest_size = self._hmac.digest_size self.block_size = self._hmac.block_size def _init_old(self, key, msg, digestmod): import warnings digest_cons = _get_digest_constructor(digestmod) if _is_shake_constructor(digest_cons): raise ValueError(f"unsupported hash algorithm {digestmod}") self._hmac = None self._outer = digest_cons() self._inner = digest_cons() self.digest_size = self._inner.digest_size if hasattr(self._inner, 'block_size'): blocksize = self._inner.block_size if blocksize < 16: warnings.warn(f"block_size of {blocksize} seems too small; " f"using our default of {self.blocksize}.", RuntimeWarning, 2) blocksize = self.blocksize # pragma: no cover else: warnings.warn("No block_size attribute on given digest object; " f"Assuming {self.blocksize}.", RuntimeWarning, 2) blocksize = self.blocksize # pragma: no cover if len(key) > blocksize: key = digest_cons(key).digest() self.block_size = blocksize key = key.ljust(blocksize, b'\0') self._outer.update(key.translate(trans_5C)) self._inner.update(key.translate(trans_36)) if msg is not None: self.update(msg) @property def name(self): if self._hmac: return self._hmac.name else: return f"hmac-{self._inner.name}" def update(self, msg): """Feed data from msg into this hashing object.""" inst = self._hmac or self._inner inst.update(msg) def copy(self): """Return a separate copy of this hashing object. An update to this copy won't affect the original object. """ # Call __new__ directly to avoid the expensive __init__. other = self.__class__.__new__(self.__class__) other.digest_size = self.digest_size other.block_size = self.block_size if self._hmac: other._hmac = self._hmac.copy() other._inner = other._outer = None else: other._hmac = None other._inner = self._inner.copy() other._outer = self._outer.copy() return other def _current(self): """Return a hash object for the current state. To be used only internally with digest() and hexdigest(). """ if self._hmac: return self._hmac else: h = self._outer.copy() h.update(self._inner.digest()) return h def digest(self): """Return the hash value of this hashing object. This returns the hmac value as bytes. The object is not altered in any way by this function; you can continue updating the object after calling this function. """ h = self._current() return h.digest() def hexdigest(self): """Like digest(), but returns a string of hexadecimal digits instead. """ h = self._current() return h.hexdigest() def new(key, msg=None, digestmod=''): """Create a new hashing object and return it. key: bytes or buffer, The starting key for the hash. msg: bytes or buffer, Initial input for the hash, or None. digestmod: A hash name suitable for hashlib.new(). *OR* A hashlib constructor returning a new hash object. *OR* A module supporting PEP 247. Required as of 3.8, despite its position after the optional msg argument. Passing it as a keyword argument is recommended, though not required for legacy API reasons. You can now feed arbitrary bytes into the object using its update() method, and can ask for the hash value at any time by calling its digest() or hexdigest() methods. """ return HMAC(key, msg, digestmod) def digest(key, msg, digest): """Fast inline implementation of HMAC. key: bytes or buffer, The key for the keyed hash object. msg: bytes or buffer, Input message. digest: A hash name suitable for hashlib.new() for best performance. *OR* A hashlib constructor returning a new hash object. *OR* A module supporting PEP 247. """ if _hashopenssl and isinstance(digest, (str, _functype)): try: return _hashopenssl.hmac_digest(key, msg, digest) except OverflowError: # OpenSSL's HMAC limits the size of the key to INT_MAX. # Instead of falling back to HACL* implementation which # may still not be supported due to a too large key, we # directly switch to the pure Python fallback instead # even if we could have used streaming HMAC for small keys # but large messages. return _compute_digest_fallback(key, msg, digest) except _hashopenssl.UnsupportedDigestmodError: pass if _hmac and isinstance(digest, str): try: return _hmac.compute_digest(key, msg, digest) except (OverflowError, _hmac.UnknownHashError): # HACL* HMAC limits the size of the key to UINT32_MAX # so we fallback to the pure Python implementation even # if streaming HMAC may have been used for small keys # and large messages. pass return _compute_digest_fallback(key, msg, digest) def _compute_digest_fallback(key, msg, digest): digest_cons = _get_digest_constructor(digest) if _is_shake_constructor(digest_cons): raise ValueError(f"unsupported hash algorithm {digest}") inner = digest_cons() outer = digest_cons() blocksize = getattr(inner, 'block_size', 64) if len(key) > blocksize: key = digest_cons(key).digest() key = key.ljust(blocksize, b'\0') inner.update(key.translate(trans_36)) outer.update(key.translate(trans_5C)) inner.update(msg) outer.update(inner.digest()) return outer.digest()
python
github
https://github.com/python/cpython
Lib/hmac.py
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import os import re import random import shutil import socket import string import json import ipaddress import charms.leadership from shutil import move from shlex import split from subprocess import check_call from subprocess import check_output from subprocess import CalledProcessError from charms import layer from charms.layer import snap from charms.reactive import hook from charms.reactive import remove_state from charms.reactive import set_state from charms.reactive import is_state from charms.reactive import when, when_any, when_not, when_all from charms.reactive.helpers import data_changed, any_file_changed from charms.kubernetes.common import get_version from charms.kubernetes.common import retry from charms.layer import tls_client from charmhelpers.core import hookenv from charmhelpers.core import host from charmhelpers.core import unitdata from charmhelpers.core.host import service_stop from charmhelpers.core.templating import render from charmhelpers.fetch import apt_install from charmhelpers.contrib.charmsupport import nrpe # Override the default nagios shortname regex to allow periods, which we # need because our bin names contain them (e.g. 'snap.foo.daemon'). The # default regex in charmhelpers doesn't allow periods, but nagios itself does. nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') def set_upgrade_needed(): set_state('kubernetes-master.upgrade-needed') config = hookenv.config() previous_channel = config.previous('channel') require_manual = config.get('require-manual-upgrade') hookenv.log('set upgrade needed') if previous_channel is None or not require_manual: hookenv.log('forcing upgrade') set_state('kubernetes-master.upgrade-specified') @when('config.changed.channel') def channel_changed(): set_upgrade_needed() def service_cidr(): ''' Return the charm's service-cidr config ''' db = unitdata.kv() frozen_cidr = db.get('kubernetes-master.service-cidr') return frozen_cidr or hookenv.config('service-cidr') def freeze_service_cidr(): ''' Freeze the service CIDR. Once the apiserver has started, we can no longer safely change this value. ''' db = unitdata.kv() db.set('kubernetes-master.service-cidr', service_cidr()) @hook('upgrade-charm') def check_for_upgrade_needed(): '''An upgrade charm event was triggered by Juju, react to that here.''' hookenv.status_set('maintenance', 'Checking resources') migrate_from_pre_snaps() add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] if any_file_changed(paths): set_upgrade_needed() def add_rbac_roles(): '''Update the known_tokens file with proper groups.''' tokens_fname = '/root/cdk/known_tokens.csv' tokens_backup_fname = '/root/cdk/known_tokens.csv.backup' move(tokens_fname, tokens_backup_fname) with open(tokens_fname, 'w') as ftokens: with open(tokens_backup_fname, 'r') as stream: for line in stream: record = line.strip().split(',') # token, username, user, groups if record[2] == 'admin' and len(record) == 3: towrite = '{0},{1},{2},"{3}"\n'.format(record[0], record[1], record[2], 'system:masters') ftokens.write(towrite) continue if record[2] == 'kube_proxy': towrite = '{0},{1},{2}\n'.format(record[0], 'system:kube-proxy', 'kube-proxy') ftokens.write(towrite) continue if record[2] == 'kubelet' and record[1] == 'kubelet': continue ftokens.write('{}'.format(line)) def rename_file_idempotent(source, destination): if os.path.isfile(source): os.rename(source, destination) def migrate_from_pre_snaps(): # remove old states remove_state('kubernetes.components.installed') remove_state('kubernetes.dashboard.available') remove_state('kube-dns.available') remove_state('kubernetes-master.app_version.set') # disable old services services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler'] for service in services: hookenv.log('Stopping {0} service.'.format(service)) host.service_stop(service) # rename auth files os.makedirs('/root/cdk', exist_ok=True) rename_file_idempotent('/etc/kubernetes/serviceaccount.key', '/root/cdk/serviceaccount.key') rename_file_idempotent('/srv/kubernetes/basic_auth.csv', '/root/cdk/basic_auth.csv') rename_file_idempotent('/srv/kubernetes/known_tokens.csv', '/root/cdk/known_tokens.csv') # cleanup old files files = [ "/lib/systemd/system/kube-apiserver.service", "/lib/systemd/system/kube-controller-manager.service", "/lib/systemd/system/kube-scheduler.service", "/etc/default/kube-defaults", "/etc/default/kube-apiserver.defaults", "/etc/default/kube-controller-manager.defaults", "/etc/default/kube-scheduler.defaults", "/srv/kubernetes", "/home/ubuntu/kubectl", "/usr/local/bin/kubectl", "/usr/local/bin/kube-apiserver", "/usr/local/bin/kube-controller-manager", "/usr/local/bin/kube-scheduler", "/etc/kubernetes" ] for file in files: if os.path.isdir(file): hookenv.log("Removing directory: " + file) shutil.rmtree(file) elif os.path.isfile(file): hookenv.log("Removing file: " + file) os.remove(file) @when('kubernetes-master.upgrade-needed') @when_not('kubernetes-master.upgrade-specified') def upgrade_needed_status(): msg = 'Needs manual upgrade, run the upgrade action' hookenv.status_set('blocked', msg) @when('kubernetes-master.upgrade-specified') def do_upgrade(): install_snaps() remove_state('kubernetes-master.upgrade-needed') remove_state('kubernetes-master.upgrade-specified') def install_snaps(): channel = hookenv.config('channel') hookenv.status_set('maintenance', 'Installing kubectl snap') snap.install('kubectl', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kube-apiserver snap') snap.install('kube-apiserver', channel=channel) hookenv.status_set('maintenance', 'Installing kube-controller-manager snap') snap.install('kube-controller-manager', channel=channel) hookenv.status_set('maintenance', 'Installing kube-scheduler snap') snap.install('kube-scheduler', channel=channel) hookenv.status_set('maintenance', 'Installing cdk-addons snap') snap.install('cdk-addons', channel=channel) set_state('kubernetes-master.snaps.installed') remove_state('kubernetes-master.components.started') @when('config.changed.client_password', 'leadership.is_leader') def password_changed(): """Handle password change via the charms config.""" password = hookenv.config('client_password') if password == "" and is_state('client.password.initialised'): # password_changed is called during an upgrade. Nothing to do. return elif password == "": # Password not initialised password = token_generator() setup_basic_auth(password, "admin", "admin") set_state('reconfigure.authentication.setup') remove_state('authentication.setup') set_state('client.password.initialised') @when('cni.connected') @when_not('cni.configured') def configure_cni(cni): ''' Set master configuration on the CNI relation. This lets the CNI subordinate know that we're the master so it can respond accordingly. ''' cni.set_config(is_master=True, kubeconfig_path='') @when('leadership.is_leader') @when_not('authentication.setup') def setup_leader_authentication(): '''Setup basic authentication and token access for the cluster.''' service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' hookenv.status_set('maintenance', 'Rendering authentication templates.') keys = [service_key, basic_auth, known_tokens] # Try first to fetch data from an old leadership broadcast. if not get_keys_from_leader(keys) \ or is_state('reconfigure.authentication.setup'): last_pass = get_password('basic_auth.csv', 'admin') setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters') if not os.path.isfile(known_tokens): touch(known_tokens) # Generate the default service account token key os.makedirs('/root/cdk', exist_ok=True) if not os.path.isfile(service_key): cmd = ['openssl', 'genrsa', '-out', service_key, '2048'] check_call(cmd) remove_state('reconfigure.authentication.setup') # read service account key for syndication leader_data = {} for f in [known_tokens, basic_auth, service_key]: with open(f, 'r') as fp: leader_data[f] = fp.read() # this is slightly opaque, but we are sending file contents under its file # path as a key. # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} charms.leadership.leader_set(leader_data) remove_state('kubernetes-master.components.started') set_state('authentication.setup') @when_not('leadership.is_leader') def setup_non_leader_authentication(): service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' keys = [service_key, basic_auth, known_tokens] # The source of truth for non-leaders is the leader. # Therefore we overwrite_local with whatever the leader has. if not get_keys_from_leader(keys, overwrite_local=True): # the keys were not retrieved. Non-leaders have to retry. return if not any_file_changed(keys) and is_state('authentication.setup'): # No change detected and we have already setup the authentication return hookenv.status_set('maintenance', 'Rendering authentication templates.') remove_state('kubernetes-master.components.started') set_state('authentication.setup') def get_keys_from_leader(keys, overwrite_local=False): """ Gets the broadcasted keys from the leader and stores them in the corresponding files. Args: keys: list of keys. Keys are actually files on the FS. Returns: True if all key were fetched, False if not. """ # This races with other codepaths, and seems to require being created first # This block may be extracted later, but for now seems to work as intended os.makedirs('/root/cdk', exist_ok=True) for k in keys: # If the path does not exist, assume we need it if not os.path.exists(k) or overwrite_local: # Fetch data from leadership broadcast contents = charms.leadership.leader_get(k) # Default to logging the warning and wait for leader data to be set if contents is None: msg = "Waiting on leaders crypto keys." hookenv.status_set('waiting', msg) hookenv.log('Missing content for file {}'.format(k)) return False # Write out the file and move on to the next item with open(k, 'w+') as fp: fp.write(contents) fp.write('\n') return True @when('kubernetes-master.snaps.installed') def set_app_version(): ''' Declare the application version to juju ''' version = check_output(['kube-apiserver', '--version']) hookenv.application_version_set(version.split(b' v')[-1].rstrip()) @when('cdk-addons.configured', 'kube-api-endpoint.available', 'kube-control.connected') def idle_status(kube_api, kube_control): ''' Signal at the end of the run that we are running. ''' if not all_kube_system_pods_running(): hookenv.status_set('waiting', 'Waiting for kube-system pods to start') elif hookenv.config('service-cidr') != service_cidr(): msg = 'WARN: cannot change service-cidr, still using ' + service_cidr() hookenv.status_set('active', msg) else: # All services should be up and running at this point. Double-check... failing_services = master_services_down() if len(failing_services) == 0: hookenv.status_set('active', 'Kubernetes master running.') else: msg = 'Stopped services: {}'.format(','.join(failing_services)) hookenv.status_set('blocked', msg) def master_services_down(): """Ensure master services are up and running. Return: list of failing services""" services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler'] failing_services = [] for service in services: daemon = 'snap.{}.daemon'.format(service) if not host.service_running(daemon): failing_services.append(service) return failing_services @when('etcd.available', 'tls_client.server.certificate.saved', 'authentication.setup') @when_not('kubernetes-master.components.started') def start_master(etcd): '''Run the Kubernetes master components.''' hookenv.status_set('maintenance', 'Configuring the Kubernetes master services.') freeze_service_cidr() if not etcd.get_connection_string(): # etcd is not returning a connection string. This happens when # the master unit disconnects from etcd and is ready to terminate. # No point in trying to start master services and fail. Just return. return # TODO: Make sure below relation is handled on change # https://github.com/kubernetes/kubernetes/issues/43461 handle_etcd_relation(etcd) # Add CLI options to all components configure_apiserver(etcd) configure_controller_manager() configure_scheduler() hookenv.open_port(6443) @when('etcd.available') def etcd_data_change(etcd): ''' Etcd scale events block master reconfiguration due to the kubernetes-master.components.started state. We need a way to handle these events consistenly only when the number of etcd units has actually changed ''' # key off of the connection string connection_string = etcd.get_connection_string() # If the connection string changes, remove the started state to trigger # handling of the master components if data_changed('etcd-connect', connection_string): remove_state('kubernetes-master.components.started') @when('kube-control.connected') @when('cdk-addons.configured') def send_cluster_dns_detail(kube_control): ''' Send cluster DNS info ''' # Note that the DNS server doesn't necessarily exist at this point. We know # where we're going to put it, though, so let's send the info anyway. dns_ip = get_dns_ip() kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) @when('kube-control.connected') @when('snap.installed.kubectl') @when('leadership.is_leader') def create_service_configs(kube_control): """Create the users for kubelet""" should_restart = False # generate the username/pass for the requesting unit proxy_token = get_token('system:kube-proxy') if not proxy_token: setup_tokens(None, 'system:kube-proxy', 'kube-proxy') proxy_token = get_token('system:kube-proxy') should_restart = True client_token = get_token('admin') if not client_token: setup_tokens(None, 'admin', 'admin', "system:masters") client_token = get_token('admin') should_restart = True requests = kube_control.auth_user() for request in requests: username = request[1]['user'] group = request[1]['group'] kubelet_token = get_token(username) if not kubelet_token and username and group: # Usernames have to be in the form of system:node:<nodeName> userid = "kubelet-{}".format(request[0].split('/')[1]) setup_tokens(None, username, userid, group) kubelet_token = get_token(username) kube_control.sign_auth_request(request[0], username, kubelet_token, proxy_token, client_token) should_restart = True if should_restart: host.service_restart('snap.kube-apiserver.daemon') remove_state('authentication.setup') @when_not('kube-control.connected') def missing_kube_control(): """Inform the operator master is waiting for a relation to workers. If deploying via bundle this won't happen, but if operator is upgrading a a charm in a deployment that pre-dates the kube-control relation, it'll be missing. """ hookenv.status_set('blocked', 'Waiting for workers.') @when('kube-api-endpoint.available') def push_service_data(kube_api): ''' Send configuration to the load balancer, and close access to the public interface ''' kube_api.configure(port=6443) @when('certificates.available') def send_data(tls): '''Send the data that is required to create a server certificate for this server.''' # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() # Get the SDN gateway based on the cidr address. kubernetes_service_ip = get_kubernetes_service_ip() domain = hookenv.config('dns_domain') # Create SANs that the tls layer will add to the server cert. sans = [ hookenv.unit_public_ip(), hookenv.unit_private_ip(), socket.gethostname(), kubernetes_service_ip, 'kubernetes', 'kubernetes.{0}'.format(domain), 'kubernetes.default', 'kubernetes.default.svc', 'kubernetes.default.svc.{0}'.format(domain) ] # maybe they have extra names they want as SANs extra_sans = hookenv.config('extra_sans') if extra_sans and not extra_sans == "": sans.extend(extra_sans.split()) # Create a path safe name by removing path characters from the unit name. certificate_name = hookenv.local_unit().replace('/', '_') # Request a server cert with this information. tls.request_server_cert(common_name, sans, certificate_name) @when('config.changed.extra_sans', 'certificates.available') def update_certificate(tls): # Using the config.changed.extra_sans flag to catch changes. # IP changes will take ~5 minutes or so to propagate, but # it will update. send_data(tls) @when('certificates.server.cert.available', 'kubernetes-master.components.started', 'tls_client.server.certificate.written') def kick_api_server(tls): # need to be idempotent and don't want to kick the api server # without need if data_changed('cert', tls.get_server_cert()): # certificate changed, so restart the api server hookenv.log("Certificate information changed, restarting api server") set_state('kube-apiserver.do-restart') tls_client.reset_certificate_write_flag('server') @when('kubernetes-master.components.started') def configure_cdk_addons(): ''' Configure CDK addons ''' remove_state('cdk-addons.configured') dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower() args = [ 'arch=' + arch(), 'dns-ip=' + get_dns_ip(), 'dns-domain=' + hookenv.config('dns_domain'), 'enable-dashboard=' + dbEnabled ] check_call(['snap', 'set', 'cdk-addons'] + args) if not addons_ready(): hookenv.status_set('waiting', 'Waiting to retry addon deployment') remove_state('cdk-addons.configured') return set_state('cdk-addons.configured') @retry(times=3, delay_secs=20) def addons_ready(): """ Test if the add ons got installed Returns: True is the addons got applied """ try: check_call(['cdk-addons.apply']) return True except CalledProcessError: hookenv.log("Addons are not ready yet.") return False @when('loadbalancer.available', 'certificates.ca.available', 'certificates.client.cert.available', 'authentication.setup') def loadbalancer_kubeconfig(loadbalancer, ca, client): # Get the potential list of loadbalancers from the relation object. hosts = loadbalancer.get_addresses_ports() # Get the public address of loadbalancers so users can access the cluster. address = hosts[0].get('public-address') # Get the port of the loadbalancer so users can access the cluster. port = hosts[0].get('port') server = 'https://{0}:{1}'.format(address, port) build_kubeconfig(server) @when('certificates.ca.available', 'certificates.client.cert.available', 'authentication.setup') @when_not('loadbalancer.available') def create_self_config(ca, client): '''Create a kubernetes configuration for the master unit.''' server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443) build_kubeconfig(server) @when('ceph-storage.available') def ceph_state_control(ceph_admin): ''' Determine if we should remove the state that controls the re-render and execution of the ceph-relation-changed event because there are changes in the relationship data, and we should re-render any configs, keys, and/or service pre-reqs ''' ceph_relation_data = { 'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'hostname': socket.gethostname(), 'key': ceph_admin.key() } # Re-execute the rendering if the data has changed. if data_changed('ceph-config', ceph_relation_data): remove_state('ceph-storage.configured') @when('ceph-storage.available') @when_not('ceph-storage.configured') def ceph_storage(ceph_admin): '''Ceph on kubernetes will require a few things - namely a ceph configuration, and the ceph secret key file used for authentication. This method will install the client package, and render the requisit files in order to consume the ceph-storage relation.''' ceph_context = { 'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'use_syslog': "true", 'ceph_public_network': '', 'ceph_cluster_network': '', 'loglevel': 1, 'hostname': socket.gethostname(), } # Install the ceph common utilities. apt_install(['ceph-common'], fatal=True) etc_ceph_directory = '/etc/ceph' if not os.path.isdir(etc_ceph_directory): os.makedirs(etc_ceph_directory) charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf') # Render the ceph configuration from the ceph conf template render('ceph.conf', charm_ceph_conf, ceph_context) # The key can rotate independently of other ceph config, so validate it admin_key = os.path.join(etc_ceph_directory, 'ceph.client.admin.keyring') try: with open(admin_key, 'w') as key_file: key_file.write("[client.admin]\n\tkey = {}\n".format( ceph_admin.key())) except IOError as err: hookenv.log("IOError writing admin.keyring: {}".format(err)) # Enlist the ceph-admin key as a kubernetes secret if ceph_admin.key(): encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8')) else: # We didn't have a key, and cannot proceed. Do not set state and # allow this method to re-execute return context = {'secret': encoded_key.decode('ascii')} render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context) try: # At first glance this is deceptive. The apply stanza will create if # it doesn't exist, otherwise it will update the entry, ensuring our # ceph-secret is always reflective of what we have in /etc/ceph # assuming we have invoked this anytime that file would change. cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml'] check_call(cmd) os.remove('/tmp/ceph-secret.yaml') except: # NOQA # the enlistment in kubernetes failed, return and prepare for re-exec return # when complete, set a state relating to configuration of the storage # backend that will allow other modules to hook into this and verify we # have performed the necessary pre-req steps to interface with a ceph # deployment. set_state('ceph-storage.configured') @when('nrpe-external-master.available') @when_not('nrpe-external-master.initial-config') def initial_nrpe_config(nagios=None): set_state('nrpe-external-master.initial-config') update_nrpe_config(nagios) @when('config.changed.authorization-mode', 'kubernetes-master.components.started') def switch_auth_mode(): config = hookenv.config() mode = config.get('authorization-mode') if data_changed('auth-mode', mode): remove_state('kubernetes-master.components.started') @when('kubernetes-master.components.started') @when('nrpe-external-master.available') @when_any('config.changed.nagios_context', 'config.changed.nagios_servicegroups') def update_nrpe_config(unused=None): services = ( 'snap.kube-apiserver.daemon', 'snap.kube-controller-manager.daemon', 'snap.kube-scheduler.daemon' ) hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.add_init_service_checks(nrpe_setup, services, current_unit) nrpe_setup.write() @when_not('nrpe-external-master.available') @when('nrpe-external-master.initial-config') def remove_nrpe_config(nagios=None): remove_state('nrpe-external-master.initial-config') # List of systemd services for which the checks will be removed services = ( 'snap.kube-apiserver.daemon', 'snap.kube-controller-manager.daemon', 'snap.kube-scheduler.daemon' ) # The current nrpe-external-master interface doesn't handle a lot of logic, # use the charm-helpers code for now. hostname = nrpe.get_nagios_hostname() nrpe_setup = nrpe.NRPE(hostname=hostname) for service in services: nrpe_setup.remove_check(shortname=service) def is_privileged(): """Return boolean indicating whether or not to set allow-privileged=true. """ privileged = hookenv.config('allow-privileged') if privileged == 'auto': return is_state('kubernetes-master.gpu.enabled') else: return privileged == 'true' @when('config.changed.allow-privileged') @when('kubernetes-master.components.started') def on_config_allow_privileged_change(): """React to changed 'allow-privileged' config value. """ remove_state('kubernetes-master.components.started') remove_state('config.changed.allow-privileged') @when('config.changed.api-extra-args') @when('kubernetes-master.components.started') @when('etcd.available') def on_config_api_extra_args_change(etcd): configure_apiserver(etcd) @when('config.changed.controller-manager-extra-args') @when('kubernetes-master.components.started') def on_config_controller_manager_extra_args_change(): configure_controller_manager() @when('config.changed.scheduler-extra-args') @when('kubernetes-master.components.started') def on_config_scheduler_extra_args_change(): configure_scheduler() @when('kube-control.gpu.available') @when('kubernetes-master.components.started') @when_not('kubernetes-master.gpu.enabled') def on_gpu_available(kube_control): """The remote side (kubernetes-worker) is gpu-enabled. We need to run in privileged mode. """ config = hookenv.config() if config['allow-privileged'] == "false": hookenv.status_set( 'active', 'GPUs available. Set allow-privileged="auto" to enable.' ) return remove_state('kubernetes-master.components.started') set_state('kubernetes-master.gpu.enabled') @when('kubernetes-master.gpu.enabled') @when_not('kubernetes-master.privileged') def disable_gpu_mode(): """We were in gpu mode, but the operator has set allow-privileged="false", so we can't run in gpu mode anymore. """ remove_state('kubernetes-master.gpu.enabled') @hook('stop') def shutdown(): """ Stop the kubernetes master services """ service_stop('snap.kube-apiserver.daemon') service_stop('snap.kube-controller-manager.daemon') service_stop('snap.kube-scheduler.daemon') @when('kube-apiserver.do-restart') def restart_apiserver(): prev_state, prev_msg = hookenv.status_get() hookenv.status_set('maintenance', 'Restarting kube-apiserver') host.service_restart('snap.kube-apiserver.daemon') hookenv.status_set(prev_state, prev_msg) remove_state('kube-apiserver.do-restart') set_state('kube-apiserver.started') @when('kube-controller-manager.do-restart') def restart_controller_manager(): prev_state, prev_msg = hookenv.status_get() hookenv.status_set('maintenance', 'Restarting kube-controller-manager') host.service_restart('snap.kube-controller-manager.daemon') hookenv.status_set(prev_state, prev_msg) remove_state('kube-controller-manager.do-restart') set_state('kube-controller-manager.started') @when('kube-scheduler.do-restart') def restart_scheduler(): prev_state, prev_msg = hookenv.status_get() hookenv.status_set('maintenance', 'Restarting kube-scheduler') host.service_restart('snap.kube-scheduler.daemon') hookenv.status_set(prev_state, prev_msg) remove_state('kube-scheduler.do-restart') set_state('kube-scheduler.started') @when_all('kube-apiserver.started', 'kube-controller-manager.started', 'kube-scheduler.started') @when_not('kubernetes-master.components.started') def componenets_started(): set_state('kubernetes-master.components.started') def arch(): '''Return the package architecture as a string. Raise an exception if the architecture is not supported by kubernetes.''' # Get the package architecture for this system. architecture = check_output(['dpkg', '--print-architecture']).rstrip() # Convert the binary result into a string. architecture = architecture.decode('utf-8') return architecture def build_kubeconfig(server): '''Gather the relevant data for Kubernetes configuration objects and create a config object with that information.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') ca_exists = ca and os.path.isfile(ca) client_pass = get_password('basic_auth.csv', 'admin') # Do we have everything we need? if ca_exists and client_pass: # Create an absolute path for the kubeconfig file. kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config') # Create the kubeconfig on this system so users can access the cluster. create_kubeconfig(kubeconfig_path, server, ca, user='admin', password=client_pass) # Make the config file readable by the ubuntu users so juju scp works. cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path] check_call(cmd) def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, user='ubuntu', context='juju-context', cluster='juju-cluster', password=None, token=None): '''Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user context and cluster.''' if not key and not certificate and not password and not token: raise ValueError('Missing authentication mechanism.') # token and password are mutually exclusive. Error early if both are # present. The developer has requested an impossible situation. # see: kubectl config set-credentials --help if token and password: raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ '--server={2} --certificate-authority={3} --embed-certs=true' check_call(split(cmd.format(kubeconfig, cluster, server, ca))) # Delete old users cmd = 'kubectl config --kubeconfig={0} unset users' check_call(split(cmd.format(kubeconfig))) # Create the credentials using the client flags. cmd = 'kubectl config --kubeconfig={0} ' \ 'set-credentials {1} '.format(kubeconfig, user) if key and certificate: cmd = '{0} --client-key={1} --client-certificate={2} '\ '--embed-certs=true'.format(cmd, key, certificate) if password: cmd = "{0} --username={1} --password={2}".format(cmd, user, password) # This is mutually exclusive from password. They will not work together. if token: cmd = "{0} --token={1}".format(cmd, token) check_call(split(cmd)) # Create a default context with the cluster. cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ '--cluster={2} --user={3}' check_call(split(cmd.format(kubeconfig, context, cluster, user))) # Make the config use this new context. cmd = 'kubectl config --kubeconfig={0} use-context {1}' check_call(split(cmd.format(kubeconfig, context))) def get_dns_ip(): '''Get an IP address for the DNS server on the provided cidr.''' interface = ipaddress.IPv4Interface(service_cidr()) # Add .10 at the end of the network ip = interface.network.network_address + 10 return ip.exploded def get_kubernetes_service_ip(): '''Get the IP address for the kubernetes service based on the cidr.''' interface = ipaddress.IPv4Interface(service_cidr()) # Add .1 at the end of the network ip = interface.network.network_address + 1 return ip.exploded def handle_etcd_relation(reldata): ''' Save the client credentials and set appropriate daemon flags when etcd declares itself as available''' # Define where the etcd tls files will be kept. etcd_dir = '/root/cdk/etcd' # Create paths to the etcd client ca, key, and cert file locations. ca = os.path.join(etcd_dir, 'client-ca.pem') key = os.path.join(etcd_dir, 'client-key.pem') cert = os.path.join(etcd_dir, 'client-cert.pem') # Save the client credentials (in relation data) to the paths provided. reldata.save_client_credentials(key, cert, ca) def parse_extra_args(config_key): elements = hookenv.config().get(config_key, '').split() args = {} for element in elements: if '=' in element: key, _, value = element.partition('=') args[key] = value else: args[element] = 'true' return args def configure_kubernetes_service(service, base_args, extra_args_key): db = unitdata.kv() prev_args_key = 'kubernetes-master.prev_args.' + service prev_args = db.get(prev_args_key) or {} extra_args = parse_extra_args(extra_args_key) args = {} for arg in prev_args: # remove previous args by setting to null args[arg] = 'null' for k, v in base_args.items(): args[k] = v for k, v in extra_args.items(): args[k] = v cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()] check_call(cmd) db.set(prev_args_key, args) def configure_apiserver(etcd): api_opts = {} # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts['allow-privileged'] = 'true' set_state('kubernetes-master.privileged') else: api_opts['allow-privileged'] = 'false' remove_state('kubernetes-master.privileged') # Handle static options for now api_opts['service-cluster-ip-range'] = service_cidr() api_opts['min-request-timeout'] = '300' api_opts['v'] = '4' api_opts['tls-cert-file'] = server_cert_path api_opts['tls-private-key-file'] = server_key_path api_opts['kubelet-certificate-authority'] = ca_cert_path api_opts['kubelet-client-certificate'] = client_cert_path api_opts['kubelet-client-key'] = client_key_path api_opts['logtostderr'] = 'true' api_opts['insecure-bind-address'] = '127.0.0.1' api_opts['insecure-port'] = '8080' api_opts['storage-backend'] = 'etcd2' # FIXME: add etcd3 support api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv' api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv' api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key' etcd_dir = '/root/cdk/etcd' etcd_ca = os.path.join(etcd_dir, 'client-ca.pem') etcd_key = os.path.join(etcd_dir, 'client-key.pem') etcd_cert = os.path.join(etcd_dir, 'client-cert.pem') api_opts['etcd-cafile'] = etcd_ca api_opts['etcd-keyfile'] = etcd_key api_opts['etcd-certfile'] = etcd_cert api_opts['etcd-servers'] = etcd.get_connection_string() admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] auth_mode = hookenv.config('authorization-mode') if 'Node' in auth_mode: admission_control.append('NodeRestriction') api_opts['authorization-mode'] = auth_mode if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts['admission-control'] = ','.join(admission_control) configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') set_state('kube-apiserver.do-restart') def configure_controller_manager(): controller_opts = {} # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') # Default to 3 minute resync. TODO: Make this configureable? controller_opts['min-resync-period'] = '3m' controller_opts['v'] = '2' controller_opts['root-ca-file'] = ca_cert_path controller_opts['logtostderr'] = 'true' controller_opts['master'] = 'http://127.0.0.1:8080' controller_opts['service-account-private-key-file'] = \ '/root/cdk/serviceaccount.key' configure_kubernetes_service('kube-controller-manager', controller_opts, 'controller-manager-extra-args') set_state('kube-controller-manager.do-restart') def configure_scheduler(): scheduler_opts = {} scheduler_opts['v'] = '2' scheduler_opts['logtostderr'] = 'true' scheduler_opts['master'] = 'http://127.0.0.1:8080' configure_kubernetes_service('kube-scheduler', scheduler_opts, 'scheduler-extra-args') set_state('kube-scheduler.do-restart') def setup_basic_auth(password=None, username='admin', uid='admin', groups=None): '''Create the htacces file and the tokens.''' root_cdk = '/root/cdk' if not os.path.isdir(root_cdk): os.makedirs(root_cdk) htaccess = os.path.join(root_cdk, 'basic_auth.csv') if not password: password = token_generator() with open(htaccess, 'w') as stream: if groups: stream.write('{0},{1},{2},"{3}"'.format(password, username, uid, groups)) else: stream.write('{0},{1},{2}'.format(password, username, uid)) def setup_tokens(token, username, user, groups=None): '''Create a token file for kubernetes authentication.''' root_cdk = '/root/cdk' if not os.path.isdir(root_cdk): os.makedirs(root_cdk) known_tokens = os.path.join(root_cdk, 'known_tokens.csv') if not token: token = token_generator() with open(known_tokens, 'a') as stream: if groups: stream.write('{0},{1},{2},"{3}"\n'.format(token, username, user, groups)) else: stream.write('{0},{1},{2}\n'.format(token, username, user)) def get_password(csv_fname, user): '''Get the password of user within the csv file provided.''' root_cdk = '/root/cdk' tokens_fname = os.path.join(root_cdk, csv_fname) if not os.path.isfile(tokens_fname): return None with open(tokens_fname, 'r') as stream: for line in stream: record = line.split(',') if record[1] == user: return record[0] return None def get_token(username): """Grab a token from the static file if present. """ return get_password('known_tokens.csv', username) def set_token(password, save_salt): ''' Store a token so it can be recalled later by token_generator. param: password - the password to be stored param: save_salt - the key to store the value of the token.''' db = unitdata.kv() db.set(save_salt, password) return db.get(save_salt) def token_generator(length=32): ''' Generate a random token for use in passwords and account tokens. param: length - the length of the token to generate''' alpha = string.ascii_letters + string.digits token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length)) return token @retry(times=3, delay_secs=10) def all_kube_system_pods_running(): ''' Check pod status in the kube-system namespace. Returns True if all pods are running, False otherwise. ''' cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json'] try: output = check_output(cmd).decode('utf-8') except CalledProcessError: hookenv.log('failed to get kube-system pod status') return False result = json.loads(output) for pod in result['items']: status = pod['status']['phase'] if status != 'Running': return False return True def apiserverVersion(): cmd = 'kube-apiserver --version'.split() version_string = check_output(cmd).decode('utf-8') return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) def touch(fname): try: os.utime(fname, None) except OSError: open(fname, 'a').close()
unknown
codeparrot/codeparrot-clean
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2019-10-23 and added to Hugging Face Transformers on 2020-11-16.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # T5 [T5](https://huggingface.co/papers/1910.10683) is a encoder-decoder transformer available in a range of sizes from 60M to 11B parameters. It is designed to handle a wide range of NLP tasks by treating them all as text-to-text problems. This eliminates the need for task-specific architectures because T5 converts every NLP task into a text generation task. To formulate every task as text generation, each task is prepended with a task-specific prefix (e.g., translate English to German: ..., summarize: ...). This enables T5 to handle tasks like translation, summarization, question answering, and more. You can find all official T5 checkpoints under the [T5](https://huggingface.co/collections/google/t5-release-65005e7c520f8d7b4d037918) collection. > [!TIP] > Click on the T5 models in the right sidebar for more examples of how to apply T5 to different language tasks. The example below demonstrates how to generate text with [`Pipeline`], [`AutoModel`], and how to translate with T5 from the command line. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline( task="text2text-generation", model="google-t5/t5-base", dtype=torch.float16, device=0 ) pipeline("translate English to French: The weather is nice today.") ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( "google-t5/t5-base" ) model = AutoModelForSeq2SeqLM.from_pretrained( "google-t5/t5-base", dtype=torch.float16, device_map="auto" ) input_ids = tokenizer("translate English to French: The weather is nice today.", return_tensors="pt").to(model.device) output = model.generate(**input_ids, cache_implementation="static") print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "translate English to French: The weather is nice today." | transformers run --task text2text-generation --model google-t5/t5-base --device 0 ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4. ```py # pip install torchao import torch from transformers import TorchAoConfig, AutoModelForSeq2SeqLM, AutoTokenizer quantization_config = TorchAoConfig("int4_weight_only", group_size=128) model = AutoModelForSeq2SeqLM.from_pretrained( "google/t5-v1_1-xl", dtype=torch.bfloat16, device_map="auto", quantization_config=quantization_config ) tokenizer = AutoTokenizer.from_pretrained("google/t5-v1_1-xl") input_ids = tokenizer("translate English to French: The weather is nice today.", return_tensors="pt").to(model.device) output = model.generate(**input_ids, cache_implementation="static") print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` ## Notes - You can pad the encoder inputs on the left or right because T5 uses relative scalar embeddings. - T5 models need a slightly higher learning rate than the default used in [`Trainer`]. Typically, values of `1e-4` and `3e-4` work well for most tasks. ## T5Config [[autodoc]] T5Config ## T5Tokenizer [[autodoc]] T5Tokenizer - get_special_tokens_mask - save_vocabulary ## T5TokenizerFast [[autodoc]] T5TokenizerFast ## T5Model [[autodoc]] T5Model - forward ## T5ForConditionalGeneration [[autodoc]] T5ForConditionalGeneration - forward ## T5EncoderModel [[autodoc]] T5EncoderModel - forward ## T5ForSequenceClassification [[autodoc]] T5ForSequenceClassification - forward ## T5ForTokenClassification [[autodoc]] T5ForTokenClassification - forward ## T5ForQuestionAnswering [[autodoc]] T5ForQuestionAnswering - forward
unknown
github
https://github.com/huggingface/transformers
docs/source/en/model_doc/t5.md
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from scipy.special import expit from sklearn import ensemble from sklearn.metrics import log_loss from sklearn.model_selection import KFold, train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = expit(np.sin(3 * x1) - 4 * x2 + x3) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = { "n_estimators": 1200, "max_depth": 3, "subsample": 0.5, "learning_rate": 0.01, "min_samples_leaf": 1, "random_state": 3, } clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params["n_estimators"] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``.""" score = np.zeros((n_estimators,), dtype=np.float64) for i, y_proba in enumerate(clf.staged_predict_proba(X_test)): score[i] = 2 * log_loss(y_test, y_proba[:, 1]) return score def cv_estimate(n_splits=None): cv = KFold(n_splits=n_splits) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv.split(X_train, y_train): cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_splits return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # line type for the three curves oob_line = "dashed" test_line = "solid" cv_line = "dashdot" # plot curves and vertical lines for best iterations plt.figure(figsize=(8, 4.8)) plt.plot(x, cumsum, label="OOB loss", color=oob_color, linestyle=oob_line) plt.plot(x, test_score, label="Test loss", color=test_color, linestyle=test_line) plt.plot(x, cv_score, label="CV loss", color=cv_color, linestyle=cv_line) plt.axvline(x=oob_best_iter, color=oob_color, linestyle=oob_line) plt.axvline(x=test_best_iter, color=test_color, linestyle=test_line) plt.axvline(x=cv_best_iter, color=cv_color, linestyle=cv_line) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array( xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter] ) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ["OOB", "CV", "Test"]) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label, rotation=90) plt.legend(loc="upper center") plt.ylabel("normalized loss") plt.xlabel("number of iterations") plt.show()
python
github
https://github.com/scikit-learn/scikit-learn
examples/ensemble/plot_gradient_boosting_oob.py
name: pandas-dev channels: - conda-forge dependencies: - python=3.13 # build dependencies - versioneer - meson=1.10.0 - meson-python=0.18.0 - cython<4.0.0a0 # test dependencies - pytest>=8.3.4 - pytest-cov - pytest-xdist>=3.6.1 - hypothesis>=6.116.0 # pandas dependencies - python-dateutil - pip - pip: - "--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" - "--pre" - "numpy" - "tzdata>=2023.3"
unknown
github
https://github.com/pandas-dev/pandas
ci/deps/actions-313-numpydev.yaml
#ifndef DATE_TIME_DATE_FORMATTING_LOCALES_HPP___ #define DATE_TIME_DATE_FORMATTING_LOCALES_HPP___ /* Copyright (c) 2002,2003 CrystalClear Software, Inc. * Use, modification and distribution is subject to the * Boost Software License, Version 1.0. (See accompanying * file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) * Author: Jeff Garland, Bart Garst * $Date$ */ #include "boost/date_time/locale_config.hpp" // set BOOST_DATE_TIME_NO_LOCALE #ifndef BOOST_DATE_TIME_NO_LOCALE #include "boost/date_time/iso_format.hpp" #include "boost/date_time/date_names_put.hpp" #include "boost/date_time/parse_format_base.hpp" #include <boost/io/ios_state.hpp> //#include <string> #include <sstream> #include <iomanip> namespace boost { namespace date_time { //! Formats a month as as string into an ostream template<class facet_type, class charT = char> class ostream_month_formatter { public: typedef typename facet_type::month_type month_type; typedef std::basic_ostream<charT> ostream_type; //! Formats a month as as string into an output iterator static void format_month(const month_type& month, ostream_type& os, const facet_type& f) { switch (f.month_format()) { case month_as_short_string: { std::ostreambuf_iterator<charT> oitr(os); f.put_month_short(oitr, month.as_enum()); break; } case month_as_long_string: { std::ostreambuf_iterator<charT> oitr(os); f.put_month_long(oitr, month.as_enum()); break; } case month_as_integer: { boost::io::basic_ios_fill_saver<charT> ifs(os); os << std::setw(2) << std::setfill(os.widen('0')) << month.as_number(); break; } } } // format_month }; //! Formats a weekday template<class weekday_type, class facet_type, class charT = char> class ostream_weekday_formatter { public: typedef typename facet_type::month_type month_type; typedef std::basic_ostream<charT> ostream_type; //! Formats a month as as string into an output iterator static void format_weekday(const weekday_type& wd, ostream_type& os, const facet_type& f, bool as_long_string) { std::ostreambuf_iterator<charT> oitr(os); if (as_long_string) { f.put_weekday_long(oitr, wd.as_enum()); } else { f.put_weekday_short(oitr, wd.as_enum()); } } // format_weekday }; //! Convert ymd to a standard string formatting policies template<class ymd_type, class facet_type, class charT = char> class ostream_ymd_formatter { public: typedef typename ymd_type::month_type month_type; typedef ostream_month_formatter<facet_type, charT> month_formatter_type; typedef std::basic_ostream<charT> ostream_type; typedef std::basic_string<charT> foo_type; //! Convert ymd to a standard string formatting policies /*! This is standard code for handling date formatting with * year-month-day based date information. This function * uses the format_type to control whether the string will * contain separator characters, and if so what the character * will be. In addtion, it can format the month as either * an integer or a string as controled by the formatting * policy */ // static string_type ymd_to_string(ymd_type ymd) // { // std::ostringstream ss; // facet_type dnp; // ymd_put(ymd, ss, dnp); // return ss.str(); // } // Put ymd to ostream -- part of ostream refactor static void ymd_put(ymd_type ymd, ostream_type& os, const facet_type& f) { boost::io::basic_ios_fill_saver<charT> ifs(os); std::ostreambuf_iterator<charT> oitr(os); switch (f.date_order()) { case ymd_order_iso: { os << ymd.year; if (f.has_date_sep_chars()) { f.month_sep_char(oitr); } month_formatter_type::format_month(ymd.month, os, f); if (f.has_date_sep_chars()) { f.day_sep_char(oitr); } os << std::setw(2) << std::setfill(os.widen('0')) << ymd.day; break; } case ymd_order_us: { month_formatter_type::format_month(ymd.month, os, f); if (f.has_date_sep_chars()) { f.day_sep_char(oitr); } os << std::setw(2) << std::setfill(os.widen('0')) << ymd.day; if (f.has_date_sep_chars()) { f.month_sep_char(oitr); } os << ymd.year; break; } case ymd_order_dmy: { os << std::setw(2) << std::setfill(os.widen('0')) << ymd.day; if (f.has_date_sep_chars()) { f.day_sep_char(oitr); } month_formatter_type::format_month(ymd.month, os, f); if (f.has_date_sep_chars()) { f.month_sep_char(oitr); } os << ymd.year; break; } } } }; //! Convert a date to string using format policies template<class date_type, class facet_type, class charT = char> class ostream_date_formatter { public: typedef std::basic_ostream<charT> ostream_type; typedef typename date_type::ymd_type ymd_type; //! Put date into an ostream static void date_put(const date_type& d, ostream_type& os, const facet_type& f) { special_values sv = d.as_special(); if (sv == not_special) { ymd_type ymd = d.year_month_day(); ostream_ymd_formatter<ymd_type, facet_type, charT>::ymd_put(ymd, os, f); } else { // output a special value std::ostreambuf_iterator<charT> coi(os); f.put_special_value(coi, sv); } } //! Put date into an ostream static void date_put(const date_type& d, ostream_type& os) { //retrieve the local from the ostream std::locale locale = os.getloc(); if (std::has_facet<facet_type>(locale)) { const facet_type& f = std::use_facet<facet_type>(locale); date_put(d, os, f); } else { //default to something sensible if no facet installed facet_type default_facet; date_put(d, os, default_facet); } } // date_to_ostream }; //class date_formatter } } //namespace date_time #endif #endif
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/date_time/date_formatting_locales.hpp
#! /usr/bin/env python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for google.protobuf.message_factory.""" __author__ = 'matthewtoia@google.com (Matt Toia)' try: import unittest2 as unittest except ImportError: import unittest from google.protobuf import descriptor_pb2 from google.protobuf.internal import factory_test1_pb2 from google.protobuf.internal import factory_test2_pb2 from google.protobuf import descriptor_database from google.protobuf import descriptor_pool from google.protobuf import message_factory class MessageFactoryTest(unittest.TestCase): def setUp(self): self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString( factory_test1_pb2.DESCRIPTOR.serialized_pb) self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString( factory_test2_pb2.DESCRIPTOR.serialized_pb) def _ExerciseDynamicClass(self, cls): msg = cls() msg.mandatory = 42 msg.nested_factory_2_enum = 0 msg.nested_factory_2_message.value = 'nested message value' msg.factory_1_message.factory_1_enum = 1 msg.factory_1_message.nested_factory_1_enum = 0 msg.factory_1_message.nested_factory_1_message.value = ( 'nested message value') msg.factory_1_message.scalar_value = 22 msg.factory_1_message.list_value.extend([u'one', u'two', u'three']) msg.factory_1_message.list_value.append(u'four') msg.factory_1_enum = 1 msg.nested_factory_1_enum = 0 msg.nested_factory_1_message.value = 'nested message value' msg.circular_message.mandatory = 1 msg.circular_message.circular_message.mandatory = 2 msg.circular_message.scalar_value = 'one deep' msg.scalar_value = 'zero deep' msg.list_value.extend([u'four', u'three', u'two']) msg.list_value.append(u'one') msg.grouped.add() msg.grouped[0].part_1 = 'hello' msg.grouped[0].part_2 = 'world' msg.grouped.add(part_1='testing', part_2='123') msg.loop.loop.mandatory = 2 msg.loop.loop.loop.loop.mandatory = 4 serialized = msg.SerializeToString() converted = factory_test2_pb2.Factory2Message.FromString(serialized) reserialized = converted.SerializeToString() self.assertEqual(serialized, reserialized) result = cls.FromString(reserialized) self.assertEqual(msg, result) def testGetPrototype(self): db = descriptor_database.DescriptorDatabase() pool = descriptor_pool.DescriptorPool(db) db.Add(self.factory_test1_fd) db.Add(self.factory_test2_fd) factory = message_factory.MessageFactory() cls = factory.GetPrototype(pool.FindMessageTypeByName( 'google.protobuf.python.internal.Factory2Message')) self.assertFalse(cls is factory_test2_pb2.Factory2Message) self._ExerciseDynamicClass(cls) cls2 = factory.GetPrototype(pool.FindMessageTypeByName( 'google.protobuf.python.internal.Factory2Message')) self.assertTrue(cls is cls2) def testGetMessages(self): # performed twice because multiple calls with the same input must be allowed for _ in range(2): messages = message_factory.GetMessages([self.factory_test1_fd, self.factory_test2_fd]) self.assertTrue( set(['google.protobuf.python.internal.Factory2Message', 'google.protobuf.python.internal.Factory1Message'], ).issubset(set(messages.keys()))) self._ExerciseDynamicClass( messages['google.protobuf.python.internal.Factory2Message']) self.assertTrue( set(['google.protobuf.python.internal.Factory2Message.one_more_field', 'google.protobuf.python.internal.another_field'], ).issubset( set(messages['google.protobuf.python.internal.Factory1Message'] ._extensions_by_name.keys()))) factory_msg1 = messages['google.protobuf.python.internal.Factory1Message'] msg1 = messages['google.protobuf.python.internal.Factory1Message']() ext1 = factory_msg1._extensions_by_name[ 'google.protobuf.python.internal.Factory2Message.one_more_field'] ext2 = factory_msg1._extensions_by_name[ 'google.protobuf.python.internal.another_field'] msg1.Extensions[ext1] = 'test1' msg1.Extensions[ext2] = 'test2' self.assertEqual('test1', msg1.Extensions[ext1]) self.assertEqual('test2', msg1.Extensions[ext2]) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# cat.stack manipulations from cat.namespace import * ns = NameSpace() @define(ns, 'clear') def clear( cat ) : ''' clear : (A -> --) desc: Removes all stack entries A: the stack contents Example: 1 2 3 clear => -- tags: stack,clear ''' cat.stack.clear() @define(ns, 'pop') def pop( cat ) : ''' pop : (any:top_item -> --) desc: Removes the top item from the cat.stack top_item: the top of the stack [0] Example: 1 2 pop => 1 tags: stack,pop,top ''' cat.stack.pop() @define(ns, 'drop') def drop( cat ) : ''' drop : (any:items int:n -> --) desc: Removes the top n items from the cat.stack items: the items on the stack Example: 1 2 3 4 2 drop => 1 2 tags: stack,drop,top ''' n = cat.stack.pop() m = cat.stack.length() n = n if n < m else m cat.stack.pop_n( n ) @define(ns, 'popd,under') def popd( cat ) : ''' popd : (any:b any:a -> any:a) under : (any:b any:a -> any:a) desc: Removes the item at [-1] on the stack b: object at [-] a: object at [0] Example: 'a 'b popd => 'b tags: stack,pop,under ''' swap( cat ) cat.stack.pop() @define(ns, 'dup') def dup( cat ) : ''' dup : (any:a -> any:a any:a) desc: Duplicate the top item on the stack a: object on top of the stack [0] Example: 'x dup => 'x 'x tags: stack,duplicate,dup ''' cat.stack.push( cat.stack.peek() ) @define(ns, 'swap') def swap( cat ) : ''' swap : (any:a any:b -> any:b any:a) desc: Swap the top two items on the stack a: object originally on stack at position [-1] b: object originally on stack at position [0] Example: 1 2 swap => 2 1 tags: stack,swap ''' a, b = cat.stack.pop_2() cat.stack.push( (a, b), multi=True ) @define(ns, 'flip') def flip( cat ) : ''' flip : (any:a any:b any:c -> any:c any:b any:a) desc: Swaps the elements on the stack at positions [0] and [-2] of the stack a: object on stack at original position [-2] b: object on stack at position [-1] c: object on stack at original position [0] Example: 1 2 3 flip => 3 2 1 tags: stack,flip ''' t, m, b = cat.stack.pop_n( 3 ) cat.stack.push( (t, m, b), multi=True ) @define(ns, 'swapd') def swapd( cat ): ''' swapd : (any:c any:b any:a -> any:b any:c any:a) desc: Swap the items at [-1] and [-2] of the stack a: object at stack position [0] b: object originally at stack position [-1] c: object originally at stack position [-2] Note: same action as: [swap] dip Example: 1 2 3 swapd => 2 1 3 tags: stack,swap ''' a, b, c = cat.stack.pop_n( 3 ) cat.stack.push( (b, c, a), multi=True ) @define(ns, 'dupd') def dupd( cat ) : ''' dupd : (any:b any:a -> any:b any:b any:a) desc: Duplicates the item at [-1] leaving item at [0] on top of the stack a: object at stack position [0] b: object originally at stack position [-1] Note: same action as: [dup] dip Example: 1 2 dupd => 1 1 2 tags: stack,duplicate,dup ''' a, b = cat.stack.pop_2() cat.stack.push( (b, b, a), multi=True ) @define(ns, 'size') def size( cat ) : ''' size: (A -> A int:size) desc: Pushes the size of the stack (i.e. number of items in the stack) onto the top of the stack A: the full stack contents size: number of items in the stack Example: 1 2 3 size => 1 2 3 3 [1 2 3] list size => [1, 2, 3] 1 tags: lists,stack,size,length ''' cat.stack.push( cat.stack.length() ) @define(ns, '+rot,rot_up') def rot_up( cat ) : ''' +rot : (any:a any:b any:c -> any:c any:a any:b) +rot_up : (any:a any:b any:c -> any:c any:a any:b) desc: Rotates the top three elements upward one position circularly: [-2] [-1] [0] -> [0] [-1] [-2] a: item originally at stack position [-2] b: item originally at stack position [-1] c: item originally at stack position [0] Example: 1 2 3 +rot => 3 1 2 tags: stack,rotate ''' if cat.stack.length() < 3 : raise Exception, "+rot: Expect at least three elements on the cat.stack" t, m, b = cat.stack.pop_n( 3 ) cat.stack.push( (t, b, m), multi=True ) @define(ns, '-rot,rot_down') def rot_down( cat ) : ''' -rot : (any:a any:b any:c -> any:b any:c any:a) rot_down : (any:a any:b any:c -> any:b any:c any:a) desc: Rotates the top three elements downward one position circularly [-2] [-1] [0] -> [-1] [0] [-2] a: item originally at stack position [-2] b: item originally at stack position [-1] c: item originally at stack position [0] Example: 1 2 3 -rot => 2 3 1 tags: stack,rotate ''' if cat.stack.length() < 3 : raise Exception, "-rot: Expect at least three elements on the cat.stack" t, m, b = cat.stack.pop_n( 3 ) cat.stack.push( (m, t, b), multi=True ) @define(ns, 'eval,apply') def _eval( cat ) : ''' eval : ( function:exec_func -> any:ans ) apply : ( function:exec_func -> any:ans ) desc: Applies a function to the stack (i.e. executes a "program" or a string) exec_func: the function to evaluate ans: the result of the evaluation Example: 2 [inc dup] apply => 3 3 2 3 [add 2 **] eval => 25 "2 3 4 + *" eval => 14 tags: functions,eval,apply ''' cat.eval( cat.stack.pop() ) @define(ns, 'dip') def dip( cat ) : ''' dip: (any:arg any:saved function:exec_func -> any:result any:saved) desc: Evaluates a function, temporarily removing the item below the function 'exec_func'. This makes the item now on top of the stack the argument to the function. After evaluation of the function the removed item is restored to the top of the stack arg: the argument of the exec_func saved: the item to be saved (removed and then replaced after function execution) exec_func: the function to be executed result: the result, if any, of executing the function Example: 2 42 [3 * dec] dip => 5 42 tags: functions,dip ''' func, second = cat.stack.pop_2() cat.stack.push( func ) _eval( cat ) cat.stack.push( second ) @define(ns, 'quote') def quote( cat ) : ''' quote: (any:obj -> function:quoted) desc: Creates a constant generating function from the top value on the stack obj: object to be 'quoted' quoted: a pseudo-function that generates the value represented by obj Example: 3.14159 quote 'pi ! => -- tags: functions,quote,generator ''' t = cat.stack.pop() cat.stack.push( lambda : cat.stack.push(t) ) @define(ns, 'compose') def compose( cat ) : ''' compose: (function:left function:right -> function:composite) desc: Creates a function by composing (concatenating) two existing functions left: the function that executes after 'right' using 'right's' results as an argument right: the function that executes first operating on the stack and producing output for the 'left' function composite: a function (lambda) object that is the composition of the 'left' and 'right' functions Example: [dup inc] [swap] compose tags: functions,compose ''' f1, f2 = cat.stack.pop_2() cat.stack.push( lambda : cat.eval2(f2, f1) ) @define(ns, 'papply') def papply( cat ) : ''' papply : (any:arg function:exec_func -> function:partial) desc: Partial application: binds the top argument to the top value in the stack arg: the argument to be bound with the 'exec_func' exec_func: function to apply after the arg partial: the resulting composition Example: 1 [<=] papply => [1 <=] tags: functions,papply ''' swap( cat ) quote( cat ) swap( cat ) compose( cat ) @define(ns, '!,save_var') def saveVar( cat ) : ''' ! : (any:obj string:userVarName -> ) save_var : (any:obj string:userVarName -> ) desc: Saves the value at [-1] to the user symbol table with the name provided by the string at [0]. Variable names may NOT duplicate any defined words (built-in or user-defined) obj: the object to be saved userVarName: the name under which the object will be found Example: 3.14159265 'pi ! tags: variables,user ''' varName, value = cat.stack.pop_2() cat.ns.addVar( varName, value ) @define(ns, '@,get_var') def fetchVar( cat ) : ''' @ : (string:userVarName -> any:val) get_var : (string:userVarName -> any:val) desc: Pushes the value of the named user-variable onto the stack Note: the userVarName by itself (no quotes or @) will push its value onto the stack userVarName: the name under which the object has been stored val: the object Example: pi ! => 3.14159265 pi => 3.14159265 tags: custom,variables,user ''' name = cat.stack.pop() defined, val = cat.ns.getVar( name ) if defined : cat.stack.push( val ) else : raise KeyError, "@: No variable called " + name @define(ns, '->aux') def push_to_aux( cat ) : ''' ->aux : (any:item -> --) desc: Pushes the item onto the auxiliary stack item: the object to be moved to the auxiliary stack Example: 42 ->aux tags: custom,auxiliary,stack,push ''' cat.stack.push_aux( cat.stack.pop() ) @define(ns, '<-aux') def pop_from_aux( cat ) : ''' <-aux : (-- -> any:item_from_aux_stack[0]) desc: Pushes the item on top of the auxiliary stack onto the regular stack item: the object moved from the auxiliary stack Example: <-aux => 42 tags: custom,auxiliary,stack,pop ''' cat.stack.push( cat.stack.pop_aux() ) @define(ns, 'n->aux') def push_n_to_aux( cat ) : ''' n->aux : (any:item any:... int:n -> --) desc: Pushes n items onto the auxiliary stack item: the objects to be moved to the auxiliary stack n: number of items below to move to the auxiliary stack Example 1 2 3 2 n->aux => 1 tags: custom,auxiliary,stack,push,multi ''' n = cat.stack.pop() items = cat.stack.pop_n( n ) cat.stack.push_aux( items, multi=True ) @define(ns, 'n<-aux') def pop_n_from_aux( cat ) : ''' n<-aux : (int:n -> any:item_from_aux_stack[0] any:item_from_aux_stack[-1] ...) desc: Pushes the top n items on the auxiliary stack onto the regular stack n: number of items to move from auxiliary stack to the regular stack item_from_aux_stack[0]: an object moved from the auxiliary stack[0] item_from_aux_stack[-1]: an object moved from the auxiliary stack[-1] etc Example 2 n<-aux => 2 3 tags: custom,auxiliary,stack,pop ''' n = cat.stack.pop() cat.stack.push( cat.stack.pop_aux(n), multi=True ) def _returnNS() : return ns
unknown
codeparrot/codeparrot-clean
import unittest from urllib3.packages.six.moves import xrange from urllib3.util.retry import Retry from urllib3.exceptions import ( ConnectTimeoutError, ReadTimeoutError, MaxRetryError ) class RetryTest(unittest.TestCase): def test_string(self): """ Retry string representation looks the way we expect """ retry = Retry() self.assertEqual(str(retry), 'Retry(total=10, connect=None, read=None, redirect=None)') for _ in range(3): retry = retry.increment() self.assertEqual(str(retry), 'Retry(total=7, connect=None, read=None, redirect=None)') def test_retry_both_specified(self): """Total can win if it's lower than the connect value""" error = ConnectTimeoutError() retry = Retry(connect=3, total=2) retry = retry.increment(error=error) retry = retry.increment(error=error) try: retry.increment(error=error) self.fail("Failed to raise error.") except MaxRetryError as e: self.assertEqual(e.reason, error) def test_retry_higher_total_loses(self): """ A lower connect timeout than the total is honored """ error = ConnectTimeoutError() retry = Retry(connect=2, total=3) retry = retry.increment(error=error) retry = retry.increment(error=error) self.assertRaises(MaxRetryError, retry.increment, error=error) def test_retry_higher_total_loses_vs_read(self): """ A lower read timeout than the total is honored """ error = ReadTimeoutError(None, "/", "read timed out") retry = Retry(read=2, total=3) retry = retry.increment(error=error) retry = retry.increment(error=error) self.assertRaises(MaxRetryError, retry.increment, error=error) def test_retry_total_none(self): """ if Total is none, connect error should take precedence """ error = ConnectTimeoutError() retry = Retry(connect=2, total=None) retry = retry.increment(error=error) retry = retry.increment(error=error) try: retry.increment(error=error) self.fail("Failed to raise error.") except MaxRetryError as e: self.assertEqual(e.reason, error) error = ReadTimeoutError(None, "/", "read timed out") retry = Retry(connect=2, total=None) retry = retry.increment(error=error) retry = retry.increment(error=error) retry = retry.increment(error=error) self.assertFalse(retry.is_exhausted()) def test_retry_default(self): """ If no value is specified, should retry connects 3 times """ retry = Retry() self.assertEqual(retry.total, 10) self.assertEqual(retry.connect, None) self.assertEqual(retry.read, None) self.assertEqual(retry.redirect, None) error = ConnectTimeoutError() retry = Retry(connect=1) retry = retry.increment(error=error) self.assertRaises(MaxRetryError, retry.increment, error=error) retry = Retry(connect=1) retry = retry.increment(error=error) self.assertFalse(retry.is_exhausted()) self.assertTrue(Retry(0).raise_on_redirect) self.assertFalse(Retry(False).raise_on_redirect) def test_retry_read_zero(self): """ No second chances on read timeouts, by default """ error = ReadTimeoutError(None, "/", "read timed out") retry = Retry(read=0) try: retry.increment(error=error) self.fail("Failed to raise error.") except MaxRetryError as e: self.assertEqual(e.reason, error) def test_backoff(self): """ Backoff is computed correctly """ max_backoff = Retry.BACKOFF_MAX retry = Retry(total=100, backoff_factor=0.2) self.assertEqual(retry.get_backoff_time(), 0) # First request retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 0) # First retry retry = retry.increment() self.assertEqual(retry.backoff_factor, 0.2) self.assertEqual(retry.total, 98) self.assertEqual(retry.get_backoff_time(), 0.4) # Start backoff retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 0.8) retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 1.6) for i in xrange(10): retry = retry.increment() self.assertEqual(retry.get_backoff_time(), max_backoff) def test_zero_backoff(self): retry = Retry() self.assertEqual(retry.get_backoff_time(), 0) retry = retry.increment() retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 0) def test_sleep(self): # sleep a very small amount of time so our code coverage is happy retry = Retry(backoff_factor=0.0001) retry = retry.increment() retry = retry.increment() retry.sleep() def test_status_forcelist(self): retry = Retry(status_forcelist=xrange(500,600)) self.assertFalse(retry.is_forced_retry('GET', status_code=200)) self.assertFalse(retry.is_forced_retry('GET', status_code=400)) self.assertTrue(retry.is_forced_retry('GET', status_code=500)) retry = Retry(total=1, status_forcelist=[418]) self.assertFalse(retry.is_forced_retry('GET', status_code=400)) self.assertTrue(retry.is_forced_retry('GET', status_code=418)) def test_exhausted(self): self.assertFalse(Retry(0).is_exhausted()) self.assertTrue(Retry(-1).is_exhausted()) self.assertEqual(Retry(1).increment().total, 0) def test_disabled(self): self.assertRaises(MaxRetryError, Retry(-1).increment) self.assertRaises(MaxRetryError, Retry(0).increment)
unknown
codeparrot/codeparrot-clean
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for V2 control flow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import function_def_to_graph from tensorflow.python.framework import ops from tensorflow.python.framework.func_graph import FuncGraph from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import control_flow_v2_func_graphs from tensorflow.python.ops import gradients_util from tensorflow.python.util import keras_deps from tensorflow.python.util import tf_contextlib _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = None _DISABLE_LOWER_USING_SWITCH_MERGE = False CondBranchFuncGraph = control_flow_v2_func_graphs.CondBranchFuncGraph WhileCondFuncGraph = control_flow_v2_func_graphs.WhileCondFuncGraph WhileBodyFuncGraph = control_flow_v2_func_graphs.WhileBodyFuncGraph def in_defun(): """Returns if the current graph is, or is nested in, a defun.""" if context.executing_eagerly(): return False graph = ops.get_default_graph() while (isinstance(graph, CondBranchFuncGraph) or isinstance(graph, WhileBodyFuncGraph) or isinstance(graph, WhileCondFuncGraph)): graph = graph.outer_graph return isinstance(graph, FuncGraph) def in_while_loop_defun(graph): """Returns if the graph is a while loop FuncGraph.""" if context.executing_eagerly(): return False return (isinstance(graph, WhileCondFuncGraph) or isinstance(graph, WhileBodyFuncGraph)) def create_new_tf_function(func_graph): """Converts func_graph to a TF_Function and adds it to the current graph. Args: func_graph: FuncGraph Returns: The name of the new TF_Function. """ func = function._EagerDefinedFunction( # pylint: disable=protected-access func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {}) func.add_to_graph(func_graph.outer_graph) return func_graph.name def unique_fn_name(scope, name): """Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. "true", "body"). Returns: A string, the name to use for the function. """ return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_") def unique_grad_fn_name(forward_name): return "%s_grad_%s" % (forward_name, ops.uid()) def maybe_set_lowering_attr(op, lower_using_switch_merge=None): """Sets the flag to enable lowering on `op` if necessary. Lowering allows cond_v2 and while_v2 to avoid some of the limitations of Functions, allowing users to specify devices & colocation inside of cond_v2 and while_v2 input functions, and enabling non-strict evaluation & partial pruning. This brings v2 control flow closer to feature parity with v1 control flow. However, we do not lower in the following cases: - When the `If` or `While` ops are in the XLA context. Because it is easier for XLA to apply its own optimizations when dealing with un-lowered control flow operators than with low-level control flow primitives. - When the eager execution context specifies the executor of functions to be the single threaded executor (see context.function_executor_type()). Because the single threaded executor does not support v1 control flow ops. - When 'lower_using_switch_merge' is explicitly set to False. Args: op: An `If` or `While` Operation. lower_using_switch_merge: Explicit value to lower or not (optional). """ if lower_using_switch_merge is not None: # pylint: disable=protected-access op._set_attr("_lower_using_switch_merge", attr_value_pb2.AttrValue(b=lower_using_switch_merge)) # pylint: enable=protected-access elif (not _DISABLE_LOWER_USING_SWITCH_MERGE and not control_flow_util.GraphOrParentsInXlaContext(op.graph) and context.context().function_call_options.executor_type != "SINGLE_THREADED_EXECUTOR"): # pylint: disable=protected-access op._set_attr("_lower_using_switch_merge", attr_value_pb2.AttrValue(b=True)) # pylint: enable=protected-access def maybe_propagate_compile_time_consts_in_xla(op): """Tells XLA whether to propagate compile-time consts in the loop body. This is needed to make compile time constants available to ops, for example `max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this would always be turned on, but that doesn't work with legacy functionalized while_loops. Args: op: A `While` Operation. """ if control_flow_util.GraphOrParentsInXlaContext(op.graph): # pylint: disable=protected-access op._set_attr("_xla_propagate_compile_time_consts", attr_value_pb2.AttrValue(b=True)) # pylint: enable=protected-access def resource_input_index(tensor_name, input_names, node_defs, functions): """Returns the index of the input corresponding to `tensor_name`. This method is used to find the corresponding index of an arbitrary resource tensor in a function (the function could be a loop body). We assume that resource handles are never created in functions, so that every resource tensor can be traced back to a function input. The awkward signature of this method is to make it work with both FuncGraphs and FunctionDefs. This is so we can recurse on function call ops without building the corresponding FuncGraph (note that even if a FuncGraph for a FunctionDef already exists, the input/output/node names may have been changed when the FuncGraph was serialized to the FunctionDef, which makes it unusable with this algorithm). Args: tensor_name: the name of the resource tensor to be resolved to an input. input_names: a list of the names of all inputs to the function. node_defs: a dict mapping op name -> NodeDef for every op in the function. functions: a dict mapping function name -> _EagerDefinedFunction. Returns: The index into input_names corresponding to `tensor_name`. """ while tensor_name not in input_names: # FunctionDefs and graphs use different tensor naming conventions. parts = tensor_name.split(":") if len(parts) == 3: op_name, _, output_idx = parts elif len(parts) == 2: op_name, output_idx = parts else: assert len(parts) == 1 op_name = parts[0] output_idx = 0 tensor_name = "%s:%d" % (tensor_name, output_idx) # Check again for cases where the tensor suffix (":0") is stripped out. if tensor_name in input_names: break output_idx = int(output_idx) node_def = node_defs[op_name] if node_def.op in ("Identity", "While"): # Captured resources occur at the same index in the lists of inputs and # outputs of a while or identity op. So we lookup the input of `tensor.op` # at the same index as the index of `tensor` in the `tensor.op.outputs`. tensor_name = node_def.input[output_idx] elif node_def.op in ("PartitionedCall", "StatefulPartitionedCall"): # Functions output any captured resource tensors used by their # gradients. `tensor_name` is one of these outputs from a nested # function call, so recursively find the corresponding input in the # nested FunctionDef. func_name = node_def.attr["f"].func.name fdef = functions[func_name].definition output_arg_name = fdef.signature.output_arg[output_idx].name output_tensor_name = fdef.ret[output_arg_name] input_index = resource_input_index( output_tensor_name, [arg.name for arg in fdef.signature.input_arg], {ndef.name: ndef for ndef in fdef.node_def}, functions) tensor_name = node_def.input[input_index] else: # We assume there are no other ops types that will "forward" resource # handles like this, so all other handles must have been created by the # op. (Note that cond_v2 wraps resource handle outputs in optionals, # which we'll end up accumulating). raise ValueError("Taking gradient of a while loop which creates " "a resource in its body is not supported: %s" % op_name) return input_names.index(tensor_name) @tf_contextlib.contextmanager def clear_control_inputs(): """Clears the control inputs but preserves the ControlFlowContext. This is needed to preserve the XLAControlFlowControl when clearing control inputs for the gradient accumulators in while_v2. `ops.control_dependencies` does not allow that. Yields: A context manager in which the ops created will not have any control inputs by default but the control flow context is the same. """ # pylint: disable=protected-access control_flow_context = ops.get_default_graph()._get_control_flow_context() with ops.control_dependencies(None): ops.get_default_graph()._set_control_flow_context(control_flow_context) yield # pylint: enable=protected-access def _is_tpu_strategy(strategy): return (strategy is not None and strategy.__class__.__name__.startswith("TPUStrategy")) def _is_building_keras_layer(): # TODO(srbs): Remove this function when we no long support session with Keras. keras_call_context_function = keras_deps.get_call_context_function() if keras_call_context_function: return keras_call_context_function().layer is not None else: return False def output_all_intermediates(): """Whether to output all intermediates of a functional control flow op. The default behavior is to output intermediates only when building a Keras Layer in graph mode and that too when certain other conditions are met: 1. We do not output intermediates if the functional control flow op is being built inside a FuncGraph which is not a If/While graph. This guards against outputting intermediates in eager mode since keras adds tensors to a FuncGraph named "keras_graph" in that case. Also because we do not output intermediates of tf.function (since this feature is only for backwards compatibility) outputting intermediates of functional control flow ops built inside tf.function is of no value. 2. We do not output intermediates when the compilation is using XLA or for a TPU. 3. We do not output intermediates when a single threaded executor is used since that does not perform inlining and pruning. Returns: A bool telling whether to output all intermediates. """ if _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE is not None: return _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE if in_defun(): return False if (control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()) or _is_tpu_strategy(distribution_strategy_context.get_strategy())): return False if (context.context().function_call_options.executor_type == "SINGLE_THREADED_EXECUTOR"): return False return _is_building_keras_layer() def get_func_graph(op, input_shapes, func_name): """Generates and returns a FuncGraph for the given op and input_shapes.""" fdef = None graph = op.graph # Recursively search the func in graphs. while graph is not None: func = graph._get_function(func_name) # pylint: disable=protected-access if func is not None: fdef = func.definition break if hasattr(graph, "outer_graph"): graph = graph.outer_graph else: break if fdef is None: raise KeyError("%s cannot be found in the graph" % func_name) # `op.graph` may not be the same as `ops.get_default_graph()` e.g. # in the case of nested if ops or when the gradient is being computed # from inside a Defun. We build the `func_graph` with `op.graph` as its # `outer_graph`. This resembles how the `FuncGraph` was built in the # forward pass. We need this so that we can resolve references to tensors # in `func_graph` from its gradient graph in `_resolve_grad_inputs`. with op.graph.as_default(): func_graph = function_def_to_graph.function_def_to_graph( fdef, input_shapes) return func_graph def get_op_and_outputs(op_or_outputs): if isinstance(op_or_outputs, ops.Operation): return op_or_outputs, [] elif not op_or_outputs: # Empty list. return None, [] else: return op_or_outputs[0].op, op_or_outputs def graph_wrapped_for_higher_order_tape_gradients(graph): """Check if `graph` is wrapped by `run_as_function_for_tape_gradients`.""" while graph is not None: if "cflow_gradient_wrapper" in getattr(graph, "name", ""): return True graph = getattr(graph, "outer_graph", None) return False def run_as_function_for_tape_gradients(make_op, inputs): """Fix higher-order tape gradients by wrapping `make_op` in a function. Args: make_op: A function that takes a list of inputs and returns a list of output tensors. This function should set any handle data relevant to its outputs before returning. inputs: A list of tensors to check for tape gradients and pass to `make_op`. These should include all tensors used in `make_op`. Returns: Tensors corresponding to `make_op`'s output. """ # GradientTapes created inside a function currently don't work well with # un-wrapped control flow ops in that same function. Wrapping in an extra # layer of intermediate function means we run extra logic in the function # gradient code to record the correct intermediates on the tape. # # The function attribute inputs to control flow ops are not hashable, so we # pass everything as a capture to bypass defun's caching. if (gradients_util.PossibleTapeGradientTypes(inputs) == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER # We only need one function between the tape and the op; if we've already # wrapped once, we stop wrapping to avoid infinite recursion. and not (ops.get_default_graph().building_function and "cflow_gradient_wrapper" in ops.get_default_graph().name)): results = function.defun_with_attributes( make_op, autograph=False, attributes=dict(func_name="cflow_gradient_wrapper"))(inputs) return results else: return make_op(inputs)
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime, timedelta import unittest from flask_babel import gettext as __ from mock import Mock, patch, PropertyMock from selenium.common.exceptions import WebDriverException from superset import app, db from superset.models.core import Dashboard, Slice from superset.models.schedules import ( DashboardEmailSchedule, EmailDeliveryType, SliceEmailReportFormat, SliceEmailSchedule, ) from superset.tasks.schedules import ( create_webdriver, deliver_dashboard, deliver_slice, next_schedules, ) from .utils import read_fixture class SchedulesTestCase(unittest.TestCase): RECIPIENTS = 'recipient1@superset.com, recipient2@superset.com' BCC = 'bcc@superset.com' CSV = read_fixture('trends.csv') @classmethod def setUpClass(cls): cls.common_data = dict( active=True, crontab='* * * * *', recipients=cls.RECIPIENTS, deliver_as_group=True, delivery_type=EmailDeliveryType.inline, ) # Pick up a random slice and dashboard slce = db.session.query(Slice).all()[0] dashboard = db.session.query(Dashboard).all()[0] dashboard_schedule = DashboardEmailSchedule(**cls.common_data) dashboard_schedule.dashboard_id = dashboard.id dashboard_schedule.user_id = 1 db.session.add(dashboard_schedule) slice_schedule = SliceEmailSchedule(**cls.common_data) slice_schedule.slice_id = slce.id slice_schedule.user_id = 1 slice_schedule.email_format = SliceEmailReportFormat.data db.session.add(slice_schedule) db.session.commit() cls.slice_schedule = slice_schedule.id cls.dashboard_schedule = dashboard_schedule.id @classmethod def tearDownClass(cls): db.session.query(SliceEmailSchedule).filter_by(id=cls.slice_schedule).delete() db.session.query(DashboardEmailSchedule).filter_by( id=cls.dashboard_schedule).delete() db.session.commit() def test_crontab_scheduler(self): crontab = '* * * * *' start_at = datetime.now().replace(microsecond=0, second=0, minute=0) stop_at = start_at + timedelta(seconds=3600) # Fire off the task every minute schedules = list(next_schedules(crontab, start_at, stop_at, resolution=0)) self.assertEqual(schedules[0], start_at) self.assertEqual(schedules[-1], stop_at - timedelta(seconds=60)) self.assertEqual(len(schedules), 60) # Fire off the task every 10 minutes, controlled via resolution schedules = list(next_schedules(crontab, start_at, stop_at, resolution=10 * 60)) self.assertEqual(schedules[0], start_at) self.assertEqual(schedules[-1], stop_at - timedelta(seconds=10 * 60)) self.assertEqual(len(schedules), 6) # Fire off the task every 12 minutes, controlled via resolution schedules = list(next_schedules(crontab, start_at, stop_at, resolution=12 * 60)) self.assertEqual(schedules[0], start_at) self.assertEqual(schedules[-1], stop_at - timedelta(seconds=12 * 60)) self.assertEqual(len(schedules), 5) def test_wider_schedules(self): crontab = '*/15 2,10 * * *' for hour in range(0, 24): start_at = datetime.now().replace( microsecond=0, second=0, minute=0, hour=hour) stop_at = start_at + timedelta(seconds=3600) schedules = list(next_schedules(crontab, start_at, stop_at, resolution=0)) if hour in (2, 10): self.assertEqual(len(schedules), 4) else: self.assertEqual(len(schedules), 0) def test_complex_schedule(self): # Run the job on every Friday of March and May # On these days, run the job at # 5:10 pm # 5:11 pm # 5:12 pm # 5:13 pm # 5:14 pm # 5:15 pm # 5:25 pm # 5:28 pm # 5:31 pm # 5:34 pm # 5:37 pm # 5:40 pm crontab = '10-15,25-40/3 17 * 3,5 5' start_at = datetime.strptime('2018/01/01', '%Y/%m/%d') stop_at = datetime.strptime('2018/12/31', '%Y/%m/%d') schedules = list(next_schedules(crontab, start_at, stop_at, resolution=60)) self.assertEqual(len(schedules), 108) fmt = '%Y-%m-%d %H:%M:%S' self.assertEqual(schedules[0], datetime.strptime('2018-03-02 17:10:00', fmt)) self.assertEqual(schedules[-1], datetime.strptime('2018-05-25 17:40:00', fmt)) self.assertEqual(schedules[59], datetime.strptime('2018-03-30 17:40:00', fmt)) self.assertEqual(schedules[60], datetime.strptime('2018-05-04 17:10:00', fmt)) @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') def test_create_driver(self, mock_driver_class): mock_driver = Mock() mock_driver_class.return_value = mock_driver mock_driver.find_elements_by_id.side_effect = [True, False] create_webdriver() create_webdriver() mock_driver.add_cookie.assert_called_once() @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') @patch('superset.tasks.schedules.send_email_smtp') @patch('superset.tasks.schedules.time') def test_deliver_dashboard_inline(self, mtime, send_email_smtp, driver_class): element = Mock() driver = Mock() mtime.sleep.return_value = None driver_class.return_value = driver # Ensure that we are able to login with the driver driver.find_elements_by_id.side_effect = [True, False] driver.find_element_by_class_name.return_value = element element.screenshot_as_png = read_fixture('sample.png') schedule = db.session.query(DashboardEmailSchedule).filter_by( id=self.dashboard_schedule).all()[0] deliver_dashboard(schedule) mtime.sleep.assert_called_once() driver.screenshot.assert_not_called() send_email_smtp.assert_called_once() @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') @patch('superset.tasks.schedules.send_email_smtp') @patch('superset.tasks.schedules.time') def test_deliver_dashboard_as_attachment(self, mtime, send_email_smtp, driver_class): element = Mock() driver = Mock() mtime.sleep.return_value = None driver_class.return_value = driver # Ensure that we are able to login with the driver driver.find_elements_by_id.side_effect = [True, False] driver.find_element_by_id.return_value = element driver.find_element_by_class_name.return_value = element element.screenshot_as_png = read_fixture('sample.png') schedule = db.session.query(DashboardEmailSchedule).filter_by( id=self.dashboard_schedule).all()[0] schedule.delivery_type = EmailDeliveryType.attachment deliver_dashboard(schedule) mtime.sleep.assert_called_once() driver.screenshot.assert_not_called() send_email_smtp.assert_called_once() self.assertIsNone(send_email_smtp.call_args[1]['images']) self.assertEquals( send_email_smtp.call_args[1]['data']['screenshot.png'], element.screenshot_as_png, ) @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') @patch('superset.tasks.schedules.send_email_smtp') @patch('superset.tasks.schedules.time') def test_dashboard_chrome_like(self, mtime, send_email_smtp, driver_class): # Test functionality for chrome driver which does not support # element snapshots element = Mock() driver = Mock() mtime.sleep.return_value = None type(element).screenshot_as_png = PropertyMock(side_effect=WebDriverException) driver_class.return_value = driver # Ensure that we are able to login with the driver driver.find_elements_by_id.side_effect = [True, False] driver.find_element_by_id.return_value = element driver.find_element_by_class_name.return_value = element driver.screenshot.return_value = read_fixture('sample.png') schedule = db.session.query(DashboardEmailSchedule).filter_by( id=self.dashboard_schedule).all()[0] deliver_dashboard(schedule) mtime.sleep.assert_called_once() driver.screenshot.assert_called_once() send_email_smtp.assert_called_once() self.assertEquals(send_email_smtp.call_args[0][0], self.RECIPIENTS) self.assertEquals( list(send_email_smtp.call_args[1]['images'].values())[0], driver.screenshot.return_value, ) @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') @patch('superset.tasks.schedules.send_email_smtp') @patch('superset.tasks.schedules.time') def test_deliver_email_options(self, mtime, send_email_smtp, driver_class): element = Mock() driver = Mock() mtime.sleep.return_value = None driver_class.return_value = driver # Ensure that we are able to login with the driver driver.find_elements_by_id.side_effect = [True, False] driver.find_element_by_class_name.return_value = element element.screenshot_as_png = read_fixture('sample.png') schedule = db.session.query(DashboardEmailSchedule).filter_by( id=self.dashboard_schedule).all()[0] # Send individual mails to the group schedule.deliver_as_group = False # Set a bcc email address app.config['EMAIL_REPORT_BCC_ADDRESS'] = self.BCC deliver_dashboard(schedule) mtime.sleep.assert_called_once() driver.screenshot.assert_not_called() self.assertEquals(send_email_smtp.call_count, 2) self.assertEquals(send_email_smtp.call_args[1]['bcc'], self.BCC) @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') @patch('superset.tasks.schedules.send_email_smtp') @patch('superset.tasks.schedules.time') def test_deliver_slice_inline_image(self, mtime, send_email_smtp, driver_class): element = Mock() driver = Mock() mtime.sleep.return_value = None driver_class.return_value = driver # Ensure that we are able to login with the driver driver.find_elements_by_id.side_effect = [True, False] driver.find_element_by_class_name.return_value = element element.screenshot_as_png = read_fixture('sample.png') schedule = db.session.query(SliceEmailSchedule).filter_by( id=self.slice_schedule).all()[0] schedule.email_format = SliceEmailReportFormat.visualization schedule.delivery_format = EmailDeliveryType.inline deliver_slice(schedule) mtime.sleep.assert_called_once() driver.screenshot.assert_not_called() send_email_smtp.assert_called_once() self.assertEquals( list(send_email_smtp.call_args[1]['images'].values())[0], element.screenshot_as_png, ) @patch('superset.tasks.schedules.firefox.webdriver.WebDriver') @patch('superset.tasks.schedules.send_email_smtp') @patch('superset.tasks.schedules.time') def test_deliver_slice_attachment(self, mtime, send_email_smtp, driver_class): element = Mock() driver = Mock() mtime.sleep.return_value = None driver_class.return_value = driver # Ensure that we are able to login with the driver driver.find_elements_by_id.side_effect = [True, False] driver.find_element_by_class_name.return_value = element element.screenshot_as_png = read_fixture('sample.png') schedule = db.session.query(SliceEmailSchedule).filter_by( id=self.slice_schedule).all()[0] schedule.email_format = SliceEmailReportFormat.visualization schedule.delivery_type = EmailDeliveryType.attachment deliver_slice(schedule) mtime.sleep.assert_called_once() driver.screenshot.assert_not_called() send_email_smtp.assert_called_once() self.assertEquals( send_email_smtp.call_args[1]['data']['screenshot.png'], element.screenshot_as_png, ) @patch('superset.tasks.schedules.requests.get') @patch('superset.tasks.schedules.send_email_smtp') def test_deliver_slice_csv_attachment(self, send_email_smtp, get): response = Mock() get.return_value = response response.raise_for_status.return_value = None response.content = self.CSV schedule = db.session.query(SliceEmailSchedule).filter_by( id=self.slice_schedule).all()[0] schedule.email_format = SliceEmailReportFormat.data schedule.delivery_type = EmailDeliveryType.attachment deliver_slice(schedule) send_email_smtp.assert_called_once() file_name = __('%(name)s.csv', name=schedule.slice.slice_name) self.assertEquals( send_email_smtp.call_args[1]['data'][file_name], self.CSV, ) @patch('superset.tasks.schedules.requests.get') @patch('superset.tasks.schedules.send_email_smtp') def test_deliver_slice_csv_inline(self, send_email_smtp, get): response = Mock() get.return_value = response response.raise_for_status.return_value = None response.content = self.CSV schedule = db.session.query(SliceEmailSchedule).filter_by( id=self.slice_schedule).all()[0] schedule.email_format = SliceEmailReportFormat.data schedule.delivery_type = EmailDeliveryType.inline deliver_slice(schedule) send_email_smtp.assert_called_once() self.assertIsNone(send_email_smtp.call_args[1]['data']) self.assertTrue('<table ' in send_email_smtp.call_args[0][2])
unknown
codeparrot/codeparrot-clean
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Contains helper functions used to create protocol message classes from Descriptor objects at runtime backed by the protocol buffer C++ API. """ __author__ = 'petar@google.com (Petar Petrov)' import copy_reg import operator from google.protobuf.internal import _net_proto2___python from google.protobuf.internal import enum_type_wrapper from google.protobuf import message _LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED _LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL _CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE _TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE def GetDescriptorPool(): """Creates a new DescriptorPool C++ object.""" return _net_proto2___python.NewCDescriptorPool() _pool = GetDescriptorPool() def GetFieldDescriptor(full_field_name): """Searches for a field descriptor given a full field name.""" return _pool.FindFieldByName(full_field_name) def BuildFile(content): """Registers a new proto file in the underlying C++ descriptor pool.""" _net_proto2___python.BuildFile(content) def GetExtensionDescriptor(full_extension_name): """Searches for extension descriptor given a full field name.""" return _pool.FindExtensionByName(full_extension_name) def NewCMessage(full_message_name): """Creates a new C++ protocol message by its name.""" return _net_proto2___python.NewCMessage(full_message_name) def ScalarProperty(cdescriptor): """Returns a scalar property for the given descriptor.""" def Getter(self): return self._cmsg.GetScalar(cdescriptor) def Setter(self, value): self._cmsg.SetScalar(cdescriptor, value) return property(Getter, Setter) def CompositeProperty(cdescriptor, message_type): """Returns a Python property the given composite field.""" def Getter(self): sub_message = self._composite_fields.get(cdescriptor.name, None) if sub_message is None: cmessage = self._cmsg.NewSubMessage(cdescriptor) sub_message = message_type._concrete_class(__cmessage=cmessage) self._composite_fields[cdescriptor.name] = sub_message return sub_message return property(Getter) class RepeatedScalarContainer(object): """Container for repeated scalar fields.""" __slots__ = ['_message', '_cfield_descriptor', '_cmsg'] def __init__(self, msg, cfield_descriptor): self._message = msg self._cmsg = msg._cmsg self._cfield_descriptor = cfield_descriptor def append(self, value): self._cmsg.AddRepeatedScalar( self._cfield_descriptor, value) def extend(self, sequence): for element in sequence: self.append(element) def insert(self, key, value): values = self[slice(None, None, None)] values.insert(key, value) self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) def remove(self, value): values = self[slice(None, None, None)] values.remove(value) self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) def __setitem__(self, key, value): values = self[slice(None, None, None)] values[key] = value self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) def __getitem__(self, key): return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key) def __delitem__(self, key): self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key) def __len__(self): return len(self[slice(None, None, None)]) def __eq__(self, other): if self is other: return True if not operator.isSequenceType(other): raise TypeError( 'Can only compare repeated scalar fields against sequences.') # We are presumably comparing against some other sequence type. return other == self[slice(None, None, None)] def __ne__(self, other): return not self == other def __hash__(self): raise TypeError('unhashable object') def sort(self, *args, **kwargs): # Maintain compatibility with the previous interface. if 'sort_function' in kwargs: kwargs['cmp'] = kwargs.pop('sort_function') self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, sorted(self, *args, **kwargs)) def RepeatedScalarProperty(cdescriptor): """Returns a Python property the given repeated scalar field.""" def Getter(self): container = self._composite_fields.get(cdescriptor.name, None) if container is None: container = RepeatedScalarContainer(self, cdescriptor) self._composite_fields[cdescriptor.name] = container return container def Setter(self, new_value): raise AttributeError('Assignment not allowed to repeated field ' '"%s" in protocol message object.' % cdescriptor.name) doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name return property(Getter, Setter, doc=doc) class RepeatedCompositeContainer(object): """Container for repeated composite fields.""" __slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg'] def __init__(self, msg, cfield_descriptor, subclass): self._message = msg self._cmsg = msg._cmsg self._subclass = subclass self._cfield_descriptor = cfield_descriptor def add(self, **kwargs): cmessage = self._cmsg.AddMessage(self._cfield_descriptor) return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs) def extend(self, elem_seq): """Extends by appending the given sequence of elements of the same type as this one, copying each individual message. """ for message in elem_seq: self.add().MergeFrom(message) def remove(self, value): # TODO(protocol-devel): This is inefficient as it needs to generate a # message pointer for each message only to do index(). Move this to a C++ # extension function. self.__delitem__(self[slice(None, None, None)].index(value)) def MergeFrom(self, other): for message in other[:]: self.add().MergeFrom(message) def __getitem__(self, key): cmessages = self._cmsg.GetRepeatedMessage( self._cfield_descriptor, key) subclass = self._subclass if not isinstance(cmessages, list): return subclass(__cmessage=cmessages, __owner=self._message) return [subclass(__cmessage=m, __owner=self._message) for m in cmessages] def __delitem__(self, key): self._cmsg.DeleteRepeatedField( self._cfield_descriptor, key) def __len__(self): return self._cmsg.FieldLength(self._cfield_descriptor) def __eq__(self, other): """Compares the current instance with another one.""" if self is other: return True if not isinstance(other, self.__class__): raise TypeError('Can only compare repeated composite fields against ' 'other repeated composite fields.') messages = self[slice(None, None, None)] other_messages = other[slice(None, None, None)] return messages == other_messages def __hash__(self): raise TypeError('unhashable object') def sort(self, cmp=None, key=None, reverse=False, **kwargs): # Maintain compatibility with the old interface. if cmp is None and 'sort_function' in kwargs: cmp = kwargs.pop('sort_function') # The cmp function, if provided, is passed the results of the key function, # so we only need to wrap one of them. if key is None: index_key = self.__getitem__ else: index_key = lambda i: key(self[i]) # Sort the list of current indexes by the underlying object. indexes = range(len(self)) indexes.sort(cmp=cmp, key=index_key, reverse=reverse) # Apply the transposition. for dest, src in enumerate(indexes): if dest == src: continue self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src) # Don't swap the same value twice. indexes[src] = src def RepeatedCompositeProperty(cdescriptor, message_type): """Returns a Python property for the given repeated composite field.""" def Getter(self): container = self._composite_fields.get(cdescriptor.name, None) if container is None: container = RepeatedCompositeContainer( self, cdescriptor, message_type._concrete_class) self._composite_fields[cdescriptor.name] = container return container def Setter(self, new_value): raise AttributeError('Assignment not allowed to repeated field ' '"%s" in protocol message object.' % cdescriptor.name) doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name return property(Getter, Setter, doc=doc) class ExtensionDict(object): """Extension dictionary added to each protocol message.""" def __init__(self, msg): self._message = msg self._cmsg = msg._cmsg self._values = {} def __setitem__(self, extension, value): from google.protobuf import descriptor if not isinstance(extension, descriptor.FieldDescriptor): raise KeyError('Bad extension %r.' % (extension,)) cdescriptor = extension._cdescriptor if (cdescriptor.label != _LABEL_OPTIONAL or cdescriptor.cpp_type == _CPPTYPE_MESSAGE): raise TypeError('Extension %r is repeated and/or a composite type.' % ( extension.full_name,)) self._cmsg.SetScalar(cdescriptor, value) self._values[extension] = value def __getitem__(self, extension): from google.protobuf import descriptor if not isinstance(extension, descriptor.FieldDescriptor): raise KeyError('Bad extension %r.' % (extension,)) cdescriptor = extension._cdescriptor if (cdescriptor.label != _LABEL_REPEATED and cdescriptor.cpp_type != _CPPTYPE_MESSAGE): return self._cmsg.GetScalar(cdescriptor) ext = self._values.get(extension, None) if ext is not None: return ext ext = self._CreateNewHandle(extension) self._values[extension] = ext return ext def ClearExtension(self, extension): from google.protobuf import descriptor if not isinstance(extension, descriptor.FieldDescriptor): raise KeyError('Bad extension %r.' % (extension,)) self._cmsg.ClearFieldByDescriptor(extension._cdescriptor) if extension in self._values: del self._values[extension] def HasExtension(self, extension): from google.protobuf import descriptor if not isinstance(extension, descriptor.FieldDescriptor): raise KeyError('Bad extension %r.' % (extension,)) return self._cmsg.HasFieldByDescriptor(extension._cdescriptor) def _FindExtensionByName(self, name): """Tries to find a known extension with the specified name. Args: name: Extension full name. Returns: Extension field descriptor. """ return self._message._extensions_by_name.get(name, None) def _CreateNewHandle(self, extension): cdescriptor = extension._cdescriptor if (cdescriptor.label != _LABEL_REPEATED and cdescriptor.cpp_type == _CPPTYPE_MESSAGE): cmessage = self._cmsg.NewSubMessage(cdescriptor) return extension.message_type._concrete_class(__cmessage=cmessage) if cdescriptor.label == _LABEL_REPEATED: if cdescriptor.cpp_type == _CPPTYPE_MESSAGE: return RepeatedCompositeContainer( self._message, cdescriptor, extension.message_type._concrete_class) else: return RepeatedScalarContainer(self._message, cdescriptor) # This shouldn't happen! assert False return None def NewMessage(bases, message_descriptor, dictionary): """Creates a new protocol message *class*.""" _AddClassAttributesForNestedExtensions(message_descriptor, dictionary) _AddEnumValues(message_descriptor, dictionary) _AddDescriptors(message_descriptor, dictionary) return bases def InitMessage(message_descriptor, cls): """Constructs a new message instance (called before instance's __init__).""" cls._extensions_by_name = {} _AddInitMethod(message_descriptor, cls) _AddMessageMethods(message_descriptor, cls) _AddPropertiesForExtensions(message_descriptor, cls) copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__())) def _AddDescriptors(message_descriptor, dictionary): """Sets up a new protocol message class dictionary. Args: message_descriptor: A Descriptor instance describing this message type. dictionary: Class dictionary to which we'll add a '__slots__' entry. """ dictionary['__descriptors'] = {} for field in message_descriptor.fields: dictionary['__descriptors'][field.name] = GetFieldDescriptor( field.full_name) dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [ '_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS'] def _AddEnumValues(message_descriptor, dictionary): """Sets class-level attributes for all enum fields defined in this message. Args: message_descriptor: Descriptor object for this message type. dictionary: Class dictionary that should be populated. """ for enum_type in message_descriptor.enum_types: dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type) for enum_value in enum_type.values: dictionary[enum_value.name] = enum_value.number def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary): """Adds class attributes for the nested extensions.""" extension_dict = message_descriptor.extensions_by_name for extension_name, extension_field in extension_dict.iteritems(): assert extension_name not in dictionary dictionary[extension_name] = extension_field def _AddInitMethod(message_descriptor, cls): """Adds an __init__ method to cls.""" # Create and attach message field properties to the message class. # This can be done just once per message class, since property setters and # getters are passed the message instance. # This makes message instantiation extremely fast, and at the same time it # doesn't require the creation of property objects for each message instance, # which saves a lot of memory. for field in message_descriptor.fields: field_cdescriptor = cls.__descriptors[field.name] if field.label == _LABEL_REPEATED: if field.cpp_type == _CPPTYPE_MESSAGE: value = RepeatedCompositeProperty(field_cdescriptor, field.message_type) else: value = RepeatedScalarProperty(field_cdescriptor) elif field.cpp_type == _CPPTYPE_MESSAGE: value = CompositeProperty(field_cdescriptor, field.message_type) else: value = ScalarProperty(field_cdescriptor) setattr(cls, field.name, value) # Attach a constant with the field number. constant_name = field.name.upper() + '_FIELD_NUMBER' setattr(cls, constant_name, field.number) def Init(self, **kwargs): """Message constructor.""" cmessage = kwargs.pop('__cmessage', None) if cmessage: self._cmsg = cmessage else: self._cmsg = NewCMessage(message_descriptor.full_name) # Keep a reference to the owner, as the owner keeps a reference to the # underlying protocol buffer message. owner = kwargs.pop('__owner', None) if owner: self._owner = owner if message_descriptor.is_extendable: self.Extensions = ExtensionDict(self) else: # Reference counting in the C++ code is broken and depends on # the Extensions reference to keep this object alive during unit # tests (see b/4856052). Remove this once b/4945904 is fixed. self._HACK_REFCOUNTS = self self._composite_fields = {} for field_name, field_value in kwargs.iteritems(): field_cdescriptor = self.__descriptors.get(field_name, None) if not field_cdescriptor: raise ValueError('Protocol message has no "%s" field.' % field_name) if field_cdescriptor.label == _LABEL_REPEATED: if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE: field_name = getattr(self, field_name) for val in field_value: field_name.add().MergeFrom(val) else: getattr(self, field_name).extend(field_value) elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE: getattr(self, field_name).MergeFrom(field_value) else: setattr(self, field_name, field_value) Init.__module__ = None Init.__doc__ = None cls.__init__ = Init def _IsMessageSetExtension(field): """Checks if a field is a message set extension.""" return (field.is_extension and field.containing_type.has_options and field.containing_type.GetOptions().message_set_wire_format and field.type == _TYPE_MESSAGE and field.message_type == field.extension_scope and field.label == _LABEL_OPTIONAL) def _AddMessageMethods(message_descriptor, cls): """Adds the methods to a protocol message class.""" if message_descriptor.is_extendable: def ClearExtension(self, extension): self.Extensions.ClearExtension(extension) def HasExtension(self, extension): return self.Extensions.HasExtension(extension) def HasField(self, field_name): return self._cmsg.HasField(field_name) def ClearField(self, field_name): child_cmessage = None if field_name in self._composite_fields: child_field = self._composite_fields[field_name] del self._composite_fields[field_name] child_cdescriptor = self.__descriptors[field_name] # TODO(anuraag): Support clearing repeated message fields as well. if (child_cdescriptor.label != _LABEL_REPEATED and child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE): child_field._owner = None child_cmessage = child_field._cmsg if child_cmessage is not None: self._cmsg.ClearField(field_name, child_cmessage) else: self._cmsg.ClearField(field_name) def Clear(self): cmessages_to_release = [] for field_name, child_field in self._composite_fields.iteritems(): child_cdescriptor = self.__descriptors[field_name] # TODO(anuraag): Support clearing repeated message fields as well. if (child_cdescriptor.label != _LABEL_REPEATED and child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE): child_field._owner = None cmessages_to_release.append((child_cdescriptor, child_field._cmsg)) self._composite_fields.clear() self._cmsg.Clear(cmessages_to_release) def IsInitialized(self, errors=None): if self._cmsg.IsInitialized(): return True if errors is not None: errors.extend(self.FindInitializationErrors()); return False def SerializeToString(self): if not self.IsInitialized(): raise message.EncodeError( 'Message %s is missing required fields: %s' % ( self._cmsg.full_name, ','.join(self.FindInitializationErrors()))) return self._cmsg.SerializeToString() def SerializePartialToString(self): return self._cmsg.SerializePartialToString() def ParseFromString(self, serialized): self.Clear() self.MergeFromString(serialized) def MergeFromString(self, serialized): byte_size = self._cmsg.MergeFromString(serialized) if byte_size < 0: raise message.DecodeError('Unable to merge from string.') return byte_size def MergeFrom(self, msg): if not isinstance(msg, cls): raise TypeError( "Parameter to MergeFrom() must be instance of same class: " "expected %s got %s." % (cls.__name__, type(msg).__name__)) self._cmsg.MergeFrom(msg._cmsg) def CopyFrom(self, msg): self._cmsg.CopyFrom(msg._cmsg) def ByteSize(self): return self._cmsg.ByteSize() def SetInParent(self): return self._cmsg.SetInParent() def ListFields(self): all_fields = [] field_list = self._cmsg.ListFields() fields_by_name = cls.DESCRIPTOR.fields_by_name for is_extension, field_name in field_list: if is_extension: extension = cls._extensions_by_name[field_name] all_fields.append((extension, self.Extensions[extension])) else: field_descriptor = fields_by_name[field_name] all_fields.append( (field_descriptor, getattr(self, field_name))) all_fields.sort(key=lambda item: item[0].number) return all_fields def FindInitializationErrors(self): return self._cmsg.FindInitializationErrors() def __str__(self): return str(self._cmsg) def __eq__(self, other): if self is other: return True if not isinstance(other, self.__class__): return False return self.ListFields() == other.ListFields() def __ne__(self, other): return not self == other def __hash__(self): raise TypeError('unhashable object') def __unicode__(self): # Lazy import to prevent circular import when text_format imports this file. from google.protobuf import text_format return text_format.MessageToString(self, as_utf8=True).decode('utf-8') # Attach the local methods to the message class. for key, value in locals().copy().iteritems(): if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'): setattr(cls, key, value) # Static methods: def RegisterExtension(extension_handle): extension_handle.containing_type = cls.DESCRIPTOR cls._extensions_by_name[extension_handle.full_name] = extension_handle if _IsMessageSetExtension(extension_handle): # MessageSet extension. Also register under type name. cls._extensions_by_name[ extension_handle.message_type.full_name] = extension_handle cls.RegisterExtension = staticmethod(RegisterExtension) def FromString(string): msg = cls() msg.MergeFromString(string) return msg cls.FromString = staticmethod(FromString) def _AddPropertiesForExtensions(message_descriptor, cls): """Adds properties for all fields in this protocol message type.""" extension_dict = message_descriptor.extensions_by_name for extension_name, extension_field in extension_dict.iteritems(): constant_name = extension_name.upper() + '_FIELD_NUMBER' setattr(cls, constant_name, extension_field.number)
unknown
codeparrot/codeparrot-clean
//===--- DeadAccessScopeElimination.swift ----------------------------------==// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2025 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// import SIL /// Eliminates dead access scopes if they are not conflicting with other scopes. /// /// Removes: /// ``` /// %2 = begin_access [modify] [dynamic] %1 /// ... // no uses of %2 /// end_access %2 /// ``` /// /// However, dead _conflicting_ access scopes are not removed. /// If a conflicting scope becomes dead because an optimization e.g. removed a load, it is still /// important to get an access violation at runtime. /// Even a propagated value of a redundant load from a conflicting scope is undefined. /// /// ``` /// %2 = begin_access [modify] [dynamic] %1 /// store %x to %2 /// %3 = begin_access [read] [dynamic] %1 // conflicting with %2! /// %y = load %3 /// end_access %3 /// end_access %2 /// use(%y) /// ``` /// After redundant-load-elimination: /// ``` /// %2 = begin_access [modify] [dynamic] %1 /// store %x to %2 /// %3 = begin_access [read] [dynamic] %1 // now dead, but still conflicting with %2 /// end_access %3 /// end_access %2 /// use(%x) // propagated from the store, but undefined here! /// ``` /// In this case the scope `%3` is not removed because it's important to get an access violation /// error at runtime before the undefined value `%x` is used. /// /// This pass considers potential conflicting access scopes in called functions. /// But it does not consider potential conflicting access in callers (because it can't!). /// However, optimizations, like redundant-load-elimination, can only do such transformations if /// the outer access scope is within the function, e.g. /// /// ``` /// bb0(%0 : $*T): // an inout from a conflicting scope in the caller /// store %x to %0 /// %3 = begin_access [read] [dynamic] %1 /// %y = load %3 // cannot be propagated because it cannot be proved that %1 is the same address as %0 /// end_access %3 /// ``` /// /// All those checks are only done for dynamic access scopes, because they matter for runtime /// exclusivity checking. Dead static scopes are removed unconditionally. /// let deadAccessScopeElimination = FunctionPass(name: "dead-access-scope-elimination") { (function: Function, context: FunctionPassContext) in // Add all dead scopes here and then remove the ones which turn out to be conflicting. var removableScopes = SpecificIterableInstructionSet<BeginAccessInst>(context) defer { removableScopes.deinitialize() } var scopeTree = ScopeTree(context) // The payload is the recent access instruction at the block begin, e.g. // ``` // %1 = begin_acces %0 // br bb2 // bb2: // recent access instruction at begin of bb2 is: `%1 = begin_acces %0` // ``` // It's nil if the block is not within an access scope. // var blockWorklist = BasicBlockWorklistWithPayload<Instruction?>(context) defer { blockWorklist.deinitialize() } blockWorklist.pushIfNotVisited(function.entryBlock, with: nil) // Walk through the control flow in depth-first order. Note that we don't need to do any kind // of state merging at merge-points, because access scopes must be consistent on all paths. while let (block, recentAccessInstAtBlockBegin) = blockWorklist.pop() { // The last seen `begin_access` (or `end_access` in case of not perfectly nested scopes; see ScopeTree.backlinks) var recentAccessInst = recentAccessInstAtBlockBegin for inst in block.instructions { process(instruction: inst, updating: &recentAccessInst, &scopeTree, &removableScopes) } blockWorklist.pushIfNotVisited(contentsOf: block.successors, with: recentAccessInst) } for deadBeginAccess in removableScopes { context.erase(instructionIncludingAllUsers: deadBeginAccess) } } private func process(instruction: Instruction, updating recentAccessInst: inout Instruction?, _ scopeTree: inout ScopeTree, _ removableScopes: inout SpecificIterableInstructionSet<BeginAccessInst>) { switch instruction { case let beginAccess as BeginAccessInst: if beginAccess.isDead { // Might be removed again later if it turns out to be in a conflicting scope. removableScopes.insert(beginAccess) } if beginAccess.enforcement != .dynamic { // We unconditionally remove dead _static_ scopes, because they don't have any impact at runtime. // Usually static scopes are already removed in the optimization pipeline. However optimizations // might turn dynamic into static scopes. So let's handle them. break } scopeTree.visitEnclosingScopes(of: recentAccessInst) { enclosingBeginAccess in if beginAccess.accessKind.conflicts(with: enclosingBeginAccess.accessKind), // Avoid computing alias info if both scopes are not removable anyway. removableScopes.contains(beginAccess) || removableScopes.contains(enclosingBeginAccess), scopeTree.context.aliasAnalysis.mayAlias(beginAccess.address, enclosingBeginAccess.address) { // Conflicting enclosing scopes are not removable. removableScopes.erase(enclosingBeginAccess) // ... as well as the inner scope (which conflicts with the enclosing scope). removableScopes.erase(beginAccess) } } scopeTree.update(recent: &recentAccessInst, with: beginAccess) case let endAccess as EndAccessInst where endAccess.beginAccess.enforcement == .dynamic: scopeTree.update(recent: &recentAccessInst, with: endAccess) default: if instruction.mayCallFunction { // Check for potential conflicting scopes in called functions. scopeTree.visitEnclosingScopes(of: recentAccessInst) { enclosingBeginAccess in if removableScopes.contains(enclosingBeginAccess), instruction.mayHaveAccessScopeWhichConflicts(with: enclosingBeginAccess, scopeTree.context) { removableScopes.erase(enclosingBeginAccess) } } } } } /// Represents the tree of access scopes in a function. /// Note that if the scopes are not nested perfectly, it's strictly speaking not a tree. private struct ScopeTree { // Links `begin_access` and `end_access` instructions in backward control flow direction. // This is used to visit all enclosing scopes of a `begin_access`. // As an optimization, `end_access`es are ignored for scopes which are perfectly nested - which is // by far the most common case. In this case the backlinks simply are the parent links in the scope tree. // // Example of not perfectly nested scopes: // ``` // %1 = begin_access <------------------+ // ... | // %2 = begin_access <--------------+ -+ // ... | // end_access %1 <---------+ -+ // ... | // %3 = begin_access <-----+ -+ // ... | // end_access %2 <-+ -+ // ... | // end_access %3 -+ // ``` // // Perfectly nested scopes: // ``` // %1 = begin_access <-+ <-+ // ... | | // %2 = begin_access -+ | // end_access %2 | <- ignored // ... | // %3 = begin_access -------+ // end_access %3 <- ignored // ... // end_access %1 <- ignored // ``` private var backlinks = Dictionary<Instruction, Instruction>() let context: FunctionPassContext init(_ context: FunctionPassContext) { self.context = context } mutating func update(recent: inout Instruction?, with beginAccess: BeginAccessInst) { backlinks[beginAccess] = recent recent = beginAccess } mutating func update(recent: inout Instruction?, with endAccess: EndAccessInst) { if endAccess.beginAccess == recent { // The scope is perfectly nested. Ignore it and directly backlink to the parent of the `begin_access` recent = backlinks[endAccess.beginAccess] } else { backlinks[endAccess] = recent recent = endAccess } } func visitEnclosingScopes(of accessInstruction: Instruction?, closure: (BeginAccessInst) -> ()) { // Ignore scopes which are already closed var ignore = SpecificInstructionSet<BeginAccessInst>(context) defer { ignore.deinitialize() } var enclosingScope = accessInstruction while let parent = enclosingScope { switch parent { case let parentBeginAccess as BeginAccessInst where !ignore.contains(parentBeginAccess): closure(parentBeginAccess) case let parentEndAccess as EndAccessInst: // Seeing an `end_access` in the backlink chain means that this scope is already closed. ignore.insert(parentEndAccess.beginAccess) default: break } enclosingScope = backlinks[parent] } } } private extension Instruction { func mayHaveAccessScopeWhichConflicts(with beginAccess: BeginAccessInst, _ context: FunctionPassContext) -> Bool { if beginAccess.accessKind == .read { return mayWrite(toAddress: beginAccess.address, context.aliasAnalysis) } else { return mayReadOrWrite(address: beginAccess.address, context.aliasAnalysis) } } } private extension BeginAccessInst { var isDead: Bool { users.allSatisfy({ $0.isIncidentalUse }) } }
swift
github
https://github.com/apple/swift
SwiftCompilerSources/Sources/Optimizer/FunctionPasses/DeadAccessScopeElimination.swift
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_BETAINC_OP_H_ #define TENSORFLOW_CORE_KERNELS_BETAINC_OP_H_ // Functor definition for BetaincOp, must be compilable by nvcc. #include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive #include "tensorflow/core/framework/tensor_types.h" namespace tensorflow { namespace functor { // Functor used by BetaincOp to do the computations. template <typename Device, typename T, int NDIM> struct Betainc { void operator()(const Device& d, typename TTypes<T, NDIM>::ConstTensor a, typename TTypes<T, NDIM>::ConstTensor b, typename TTypes<T, NDIM>::ConstTensor x, typename TTypes<T, NDIM>::Tensor output) { output.device(d) = Eigen::betainc(a, b, x); } void BCast(const Device& d, typename TTypes<T, NDIM>::ConstTensor a, const typename Eigen::array<Eigen::DenseIndex, NDIM>& bcast_a, typename TTypes<T, NDIM>::ConstTensor b, const typename Eigen::array<Eigen::DenseIndex, NDIM>& bcast_b, typename TTypes<T, NDIM>::ConstTensor x, const typename Eigen::array<Eigen::DenseIndex, NDIM>& bcast_x, typename TTypes<T, NDIM>::Tensor output) { output.device(d) = Eigen::betainc( a.broadcast(bcast_a), b.broadcast(bcast_b), x.broadcast(bcast_x)); } }; } // namespace functor } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_BETAINC_OP_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/kernels/betainc_op.h
from flask import Blueprint, request bg10_40323222 = Blueprint('bg10_40323222', __name__, url_prefix='/bg10_40323222', template_folder='templates') head_str = ''' <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>網際 2D 繪圖</title> <!-- IE 9: display inline SVG --> <meta http-equiv="X-UA-Compatible" content="IE=9"> <script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script> <script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script> <script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script> <script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script> </head> <body> <script> window.onload=function(){ brython(1); } </script> <canvas id="plotarea" width="800" height="800"></canvas> ''' tail_str = ''' </script> </body> </html> ''' chain_str = ''' <script type="text/python"> from javascript import JSConstructor from browser import alert from browser import window import math cango = JSConstructor(window.Cango) cobj = JSConstructor(window.Cobj) shapedefs = window.shapeDefs obj2d = JSConstructor(window.Obj2D) cgo = cango("plotarea") cgo.setWorldCoords(-250, -250, 500, 500) # 畫軸線 cgo.drawAxes(0, 240, 0, 240, { "strokeColor":"#aaaaaa", "fillColor": "#aaaaaa", "xTickInterval": 20, "xLabelInterval": 20, "yTickInterval": 20, "yLabelInterval": 20}) deg = math.pi/180 # 將繪製鏈條輪廓的內容寫成 class 物件 class chain(): # 輪廓的外型設為 class variable chamber = "M -6.8397, -1.4894 \ A 7, 7, 0, 1, 0, 6.8397, -1.4894 \ A 40, 40, 0, 0, 1, 6.8397, -18.511 \ A 7, 7, 0, 1, 0, -6.8397, -18.511 \ A 40, 40, 0, 0, 1, -6.8397, -1.4894 z" #chamber = "M 0, 0 L 0, -20 z" cgoChamber = window.svgToCgoSVG(chamber) def __init__(self, fillcolor="green", border=True, strokecolor= "tan", linewidth=2, scale=1): self.fillcolor = fillcolor self.border = border self.strokecolor = strokecolor self.linewidth = linewidth self.scale = scale # 利用鏈條起點與終點定義繪圖 def basic(self, x1, y1, x2, y2): self.x1 = x1 self.y1 = y1 self.x2 = x2 self.y2 = y2 # 注意, cgo.Chamber 為成員變數 cmbr = cobj(self.cgoChamber, "SHAPE", { "fillColor": self.fillcolor, "border": self.border, "strokeColor": self.strokecolor, "lineWidth": self.linewidth }) # hole 為原點位置 hole = cobj(shapedefs.circle(4*self.scale), "PATH") cmbr.appendPath(hole) # 複製 cmbr, 然後命名為 basic1 basic1 = cmbr.dup() # 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角 basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90) # 放大 scale 倍 cgo.render(basic1, x1, y1, self.scale, 0) # 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數 def basic_rot(self, x1, y1, rot, v=False): # 若 v 為 True 則為虛擬 chain, 不 render self.x1 = x1 self.y1 = y1 self.rot = rot self.v = v # 注意, cgoChamber 為成員變數 cmbr = cobj(self.cgoChamber, "SHAPE", { "fillColor": self.fillcolor, "border": self.border, "strokeColor": self.strokecolor, "lineWidth": self.linewidth }) # hole0 為原點位置 hole = cobj(shapedefs.circle(4*self.scale), "PATH") cmbr.appendPath(hole) # 根據旋轉角度, 計算 x2 與 y2 x2 = x1 + 20*math.cos(rot*deg)*self.scale y2 = y1 + 20*math.sin(rot*deg)*self.scale # 複製 cmbr, 然後命名為 basic1 basic1 = cmbr.dup() # 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角 basic1.rotate(rot+90) # 放大 scale 倍 if v == False: cgo.render(basic1, x1, y1, self.scale, 0) return x2, y2 ''' # 傳繪 A 函式內容 def a(x, y, scale=1, color="green"): outstring = ''' # 利用 chain class 建立案例, 對應到 mychain 變數 mychain = chain(scale='''+str(scale)+''', fillcolor="'''+str(color)+'''") # 畫 A # 左邊兩個垂直單元 x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 90) x2, y2 = mychain.basic_rot(x1, y1, 90) # 左斜邊兩個單元 x3, y3 = mychain.basic_rot(x2, y2, 80) x4, y4 = mychain.basic_rot(x3, y3, 71) # 最上方水平單元 x5, y5 = mychain.basic_rot(x4, y4, 0) # 右斜邊兩個單元 x6, y6 = mychain.basic_rot(x5, y5, -71) x7, y7 = mychain.basic_rot(x6, y6, -80) # 右邊兩個垂直單元 x8, y8 = mychain.basic_rot(x7, y7, -90) x9, y9 = mychain.basic_rot(x8, y8, -90) # 中間兩個水平單元 x10, y10 = mychain.basic_rot(x8, y8, -180) mychain.basic(x10, y10, x1, y1) ''' return outstring # 傳繪 B 函式內容 def b(x, y): outstring = ''' # 利用 chain class 建立案例, 對應到 mychain 變數 mychain = chain() # 畫 B # 左邊四個垂直單元 # 每一個字元間隔為 65 pixels #x1, y1 = mychain.basic_rot(0+ 65, 0, 90) x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 90) x2, y2 = mychain.basic_rot(x1, y1, 90) x3, y3 = mychain.basic_rot(x2, y2, 90) x4, y4 = mychain.basic_rot(x3, y3, 90) # 上方一個水平單元 x5, y5 = mychain.basic_rot(x4, y4, 0) # 右斜 -30 度 x6, y6 = mychain.basic_rot(x5, y5, -30) # 右上垂直向下單元 x7, y7 = mychain.basic_rot(x6, y6, -90) # 右斜 240 度 x8, y8 = mychain.basic_rot(x7, y7, 210) # 中間水平 mychain.basic(x8, y8, x2, y2) # 右下斜 -30 度 x10, y10 = mychain.basic_rot(x8, y8, -30) # 右下垂直向下單元 x11, y11 = mychain.basic_rot(x10, y10, -90) # 右下斜 240 度 x12, y12 = mychain.basic_rot(x11, y11, 210) # 水平接回起點 mychain.basic(x12,y12, '''+str(x)+","+str(y)+''') ''' return outstring # 傳繪 C 函式內容 def c(x, y): outstring = ''' # 利用 chain class 建立案例, 對應到 mychain 變數 mychain = chain() # 上半部 # 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位 #x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90) x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+'''-10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90) # 上方轉 80 度 x2, y2 = mychain.basic_rot(x1, y1, 80) # 上方轉 30 度 x3, y3 = mychain.basic_rot(x2, y2, 30) # 上方水平 x4, y4 = mychain.basic_rot(x3, y3, 0) # 下半部, 從起點開始 -80 度 #x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80) x5, y5 = mychain.basic_rot('''+str(x)+","+str(y)+'''-10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80) # 下斜 -30 度 x6, y6 = mychain.basic_rot(x5, y5, -30) # 下方水平單元 x7, y7 = mychain.basic_rot(x6, y6, -0) ''' return outstring # 傳繪 D 函式內容 def d(x, y): outstring = ''' # 利用 chain class 建立案例, 對應到 mychain 變數 mychain = chain() # 左邊四個垂直單元 #x1, y1 = mychain.basic_rot(0+65*3, 0, 90) x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 90) x2, y2 = mychain.basic_rot(x1, y1, 90) x3, y3 = mychain.basic_rot(x2, y2, 90) x4, y4 = mychain.basic_rot(x3, y3, 90) # 上方一個水平單元 x5, y5 = mychain.basic_rot(x4, y4, 0) # 右斜 -40 度 x6, y6 = mychain.basic_rot(x5, y5, -40) x7, y7 = mychain.basic_rot(x6, y6, -60) # 右中垂直向下單元 x8, y8 = mychain.basic_rot(x7, y7, -90) # -120 度 x9, y9 = mychain.basic_rot(x8, y8, -120) # -140 x10, y10 = mychain.basic_rot(x9, y9, -140) # 水平接回原點 #mychain.basic(x10, y10, 0+65*3, 0, color="red") mychain.basic(x10, y10, '''+str(x)+","+str(y)+''') ''' return outstring def circle(x, y): outstring = ''' mychain = chain() x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+''', 50) ''' for i in range(2, 10): outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*40)+") \n" return outstring def circle1(x, y, degree=10): # 20 為鏈條兩圓距 # chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2) # degree = math.asin(20/2/radius)*180/math.pi #degree = 10 first_degree = 90 - degree repeat = 360 / degree outstring = ''' mychain = chain() x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n" return outstring def circle2(x, y, degree=10): # 20 為鏈條兩圓距 # chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2) # degree = math.asin(20/2/radius)*180/math.pi #degree = 10 first_degree = 90 - degree repeat = 360 / degree outstring = ''' mychain = chain() x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n" return outstring def twocircle(x, y): # 20 為鏈條兩圓距 # chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2) # degree = math.asin(20/2/radius)*180/math.pi x = 50 y = 0 degree = 12 # 78, 66, 54, 42, 30, 18, 6度 #必須有某些 chain 算座標但是不 render first_degree = 90 - degree repeat = 360 / degree # 第1節也是 virtual chain outstring = ''' mychain = chain() x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''', True) #x1, y1 = mychain.basic_rot('''+str(x)+","+str(y)+", "+str(first_degree)+''') ''' # 這裡要上下各多留一節虛擬 chain, 以便最後進行連接 (x7, y7) 與 (x22, y22) for i in range(2, int(repeat)+1): #if i < 7 or i > 23: if i <= 7 or i >= 23: # virautl chain outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+", True) \n" #outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n" else: outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+", 90-"+str(i*degree)+") \n" p = -150 k = 0 degree = 20 # 70, 50, 30, 10 # 從 i=5 開始, 就是 virautl chain first_degree = 90 - degree repeat = 360 / degree # 第1節不是 virtual chain outstring += ''' #mychain = chain() p1, k1 = mychain.basic_rot('''+str(p)+","+str(k)+", "+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): if i >= 5 and i <= 13: # virautl chain outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+", True) \n" #outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+") \n" else: outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+", 90-"+str(i*degree)+") \n" # 上段連接直線 # 從 p5, k5 作為起點 first_degree = 10 repeat = 11 outstring += ''' m1, n1 = mychain.basic_rot(p4, k4, '''+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): outstring += "m"+str(i)+", n"+str(i)+"=mychain.basic_rot(m"+str(i-1)+", n"+str(i-1)+", "+str(first_degree)+")\n" # 下段連接直線 # 從 p12, k12 作為起點 first_degree = -10 repeat = 11 outstring += ''' r1, s1 = mychain.basic_rot(p13, k13, '''+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): outstring += "r"+str(i)+", s"+str(i)+"=mychain.basic_rot(r"+str(i-1)+", s"+str(i-1)+", "+str(first_degree)+")\n" # 上段右方接點為 x7, y7, 左側則為 m11, n11 outstring += "mychain.basic(x7, y7, m11, n11)\n" # 下段右方接點為 x22, y22, 左側則為 r11, s11 outstring += "mychain.basic(x22, y22, r11, s11)\n" return outstring def eighteenthirty(x, y): ''' 從圖解法與符號式解法得到的兩條外切線座標點 (-203.592946177111, 0.0), (0.0, 0.0), (-214.364148466539, 56.5714145924675), (-17.8936874260919, 93.9794075692901) (-203.592946177111, 0.0), (0.0, 0.0), (-214.364148466539, -56.5714145924675), (-17.8936874260919, -93.9794075692901) 左邊關鍵鍊條起點 (-233.06, 49.48), 角度 20.78, 圓心 (-203.593, 0.0) 右邊關鍵鍊條起點 (-17.89, 93.9), 角度 4.78, 圓心 (0, 0) ''' # 20 為鏈條兩圓距 # chain 所圍之圓圈半徑為 20/2/math.asin(degree*math.pi/180/2) # degree = math.asin(20/2/radius)*180/math.pi x = 50 y = 0 degree = 20 first_degree = 110.78 #20.78 startx = -233.06+109+x starty = 49.48-175+y repeat = 360 / degree # 先畫出左邊第一關鍵節 outstring = ''' mychain = chain() x1, y1 = mychain.basic_rot('''+str(startx)+","+str(starty)+", "+str(first_degree)+''') ''' # 接著繪製左邊的非虛擬鍊條 for i in range(2, int(repeat)+1): if i >=2 and i <=11: # virautl chain #outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n" outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+", True) \n" else: outstring += "x"+str(i)+", y"+str(i)+"=mychain.basic_rot(x"+str(i-1)+", y"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n" # 接著處理右邊的非虛擬鍊條 # 先畫出右邊第一關鍵節 p = -17.89-150+x k = 93.98 degree = 12 first_degree = 94.78 repeat = 360 / degree # 第1節不是 virtual chain outstring += ''' #mychain = chain() p1, k1 = mychain.basic_rot('''+str(p)+","+str(k)+", "+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): if i >=18: # virautl chain outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+", True) \n" #outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n" else: outstring += "p"+str(i)+", k"+str(i)+"=mychain.basic_rot(p"+str(i-1)+", k"+str(i-1)+","+str(first_degree+degree-i*degree)+") \n" # 上段連接直線 # 從 x1, y1 作為起點 first_degree = 100.78 repeat = 10 outstring += ''' m1, n1 = mychain.basic_rot(x1, y1, '''+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): outstring += "m"+str(i)+", n"+str(i)+"=mychain.basic_rot(m"+str(i-1)+", n"+str(i-1)+", "+str(first_degree)+")\n" # 下段連接直線 # 從 x11, y11 作為起點 first_degree = 79.22 repeat = 10 outstring += ''' r1, s1 = mychain.basic_rot(x11, y11, '''+str(first_degree)+''') ''' for i in range(2, int(repeat)+1): outstring += "r"+str(i)+", s"+str(i)+"=mychain.basic_rot(r"+str(i-1)+", s"+str(i-1)+", "+str(first_degree)+")\n" return outstring @bg10_40323222.route('/a') def draw_a(): return head_str + chain_str + a(0, 0) + tail_str @bg10_40323222.route('/b') def draw_b(): # 每個橫向字元距離為 65 pixels, 上下字距則為 110 pixels return head_str + chain_str + b(0+65, 0) + tail_str @bg10_40323222.route('/c') def draw_c(): # 每個橫向字元距離為 65 pixels return head_str + chain_str + c(0+65*2, 0) + tail_str @bg10_40323222.route('/d') def draw_d(): return head_str + chain_str + d(0+65*3, 0) + tail_str @bg10_40323222.route('/ab') def draw_ab(): #return head_str + chain_str + a(0, 0) + b(0+65, 0) + tail_str return head_str + chain_str + a(0, 0) + b(0, 0-110) + tail_str @bg10_40323222.route('/ac') def draw_ac(): return head_str + chain_str + a(0, 0) + c(0+65, 0) + tail_str @bg10_40323222.route('/bc') def draw_bc(): return head_str + chain_str + b(0, 0) + c(0+65, 0) + tail_str @bg10_40323222.route('/abc') def draw_abc(): return head_str + chain_str + a(0, 0) + b(0+65, 0) + c(0+65*2, 0) + tail_str @bg10_40323222.route('/aaaa') def draw_aaaa(): outstring = head_str + chain_str scale = 2 for i in range(20): scale = scale*0.9 outstring += a(0+10*i, 0, scale=scale) return outstring + tail_str #return head_str + chain_str + a(0, 0, scale=1) + a(0+65, 0, scale=0.8, color="red") + a(0+65*2, 0, scale=0.6) + a(0+65*3, 0, scale=0.4, color="red") + tail_str @bg10_40323222.route('/badc') def draw_badc(): return head_str + chain_str + b(0, 0) + a(0+65, 0) + d(0+65*2, 0) + c(0+65*3, 0) + tail_str @bg10_40323222.route('/abcd') def draw_abcd(): #return head_str + chain_str + a(0, 0) + b(0+65, 0) + c(0+65*2, 0) + d(0+65*3, 0) + tail_str return head_str + chain_str + a(0, 110) + b(0, 110-110) + c(0, 110-110*2) + d(0, 110-110*3) + tail_str @bg10_40323222.route('/circle') def drawcircle(): return head_str + chain_str + circle(0, 0) + tail_str @bg10_40323222.route('/circle1/<degree>', defaults={'x': 0, 'y': 0}) @bg10_40323222.route('/circle1/<x>/<degree>', defaults={'y': 0}) @bg10_40323222.route('/circle1/<x>/<y>/<degree>') #@bg10.route('/circle1/<int:x>/<int:y>/<int:degree>') def drawcircle1(x,y,degree): return head_str + chain_str + circle1(int(x), int(y), int(degree)) + tail_str @bg10_40323222.route('/circle2/<degree>', defaults={'x': 0, 'y': 0}) @bg10_40323222.route('/circle2/<x>/<degree>', defaults={'y': 0}) @bg10_40323222.route('/circle2/<x>/<y>/<degree>') #@abg10.route('/circle2/<int:x>/<int:y>/<int:degree>') def drawcircle2(x,y,degree): return head_str + chain_str + circle2(int(x), int(y), int(degree)) + tail_str @bg10_40323222.route('/twocircle/<x>/<y>') @bg10_40323222.route('/twocircle', defaults={'x':0, 'y':0}) def drawtwocircle(x,y): return head_str + chain_str + twocircle(int(x), int(y)) + tail_str @bg10_40323222.route('/eighteenthirty/<x>/<y>') @bg10_40323222.route('/eighteenthirty', defaults={'x':0, 'y':0}) def draweithteenthirdy(x,y): return head_str + chain_str + eighteenthirty(int(x), int(y)) + tail_str @bg10_40323222.route('/snap') # http://svg.dabbles.info/snaptut-base def snap(): outstring = ''' <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>網際 snap 繪圖</title> <!-- IE 9: display inline SVG --> <meta http-equiv="X-UA-Compatible" content="IE=9"> <script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script> <script type="text/javascript" src="/static/snap.svg-min.js"></script> <script> window.onload=function(){ brython(1); } </script> </head> <body> <svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg> <script type="text/python"> from javascript import JSConstructor from browser import alert from browser import window, document # 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容 snap = JSConstructor(window.Snap) s = snap("#svgout") # 建立物件時, 同時設定 id 名稱 r = s.rect(10,10,100,100).attr({'id': 'rect'}) c = s.circle(100,100,50).attr({'id': 'circle'}) r.attr('fill', 'red') c.attr({ 'fill': 'blue', 'stroke': 'black', 'strokeWidth': 10 }) r.attr({ 'stroke': '#123456', 'strokeWidth': 20 }) s.text(180,100, '點按一下圖形').attr({'fill' : 'blue', 'stroke': 'blue', 'stroke-width': 0.2 }) g = s.group().attr({'id': 'tux'}) def hoverover(ev): g.animate({'transform': 's1.5r45,t180,20'}, 1000, window.mina.bounce) def hoverout(ev): g.animate({'transform': 's1r0,t180,20'}, 1000, window.mina.bounce) # callback 函式 def onSVGLoaded(data): #s.append(data) g.append(data) #g.hover(hoverover, hoverout ) g.text(300,100, '拿滑鼠指向我') # 利用 window.Snap.load 載入 svg 檔案 tux = window.Snap.load("/static/Dreaming_tux.svg", onSVGLoaded) g.transform('t180,20') # 與視窗事件對應的函式 def rtoyellow(ev): r.attr('fill', 'yellow') def ctogreen(ev): c.attr('fill', 'green') # 根據物件 id 綁定滑鼠事件執行對應函式 document['rect'].bind('click', rtoyellow) document['circle'].bind('click', ctogreen) document['tux'].bind('mouseover', hoverover) document['tux'].bind('mouseleave', hoverout) </script> </body> </html> ''' return outstring @bg10_40323222.route('/snap_link') # http://svg.dabbles.info/ def snap_link(): outstring = ''' <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>網際 snap 繪圖</title> <!-- IE 9: display inline SVG --> <meta http-equiv="X-UA-Compatible" content="IE=9"> <script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script> <script type="text/javascript" src="/static/snap.svg-min.js"></script> <script> window.onload=function(){ brython(1); } </script> </head> <body> <svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg> <script type="text/python"> from javascript import JSConstructor from browser import alert from browser import window, document # 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容 snap = JSConstructor(window.Snap) # 使用 id 為 "svgout" 的 svg 標註進行繪圖 s = snap("#svgout") offsetY = 50 # 是否標訂出繪圖範圍 #borderRect = s.rect(0,0,800,640,10,10).attr({ 'stroke': "silver", 'fill': "silver", 'strokeWidth': "3" }) g = s.group().transform('t250,120') r0 = s.rect(150,150,100,100,20,20).attr({ 'fill': "orange", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" }) c0 = s.circle(225,225,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c0' }) g0 = s.group( r0,c0 ).attr({ 'id': 'g0' }) #g0.animate({ 'transform' : 't250,120r360,225,225' },4000) g0.appendTo( g ) g0.animate({ 'transform' : 'r360,225,225' },4000) # 讓 g0 可以拖動 g0.drag() r1 = s.rect(100,100,100,100,20,20).attr({ 'fill': "red", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" }) c1 = s.circle(175,175,10).attr({ 'fill': "silver", 'stroke': "black" , 'strokeWidth': "4"}).attr({ 'id': 'c1' }) g1 = s.group( r1,c1 ).attr({ 'id': 'g1' }) g1.appendTo( g0 ).attr({ 'id': 'g1' }) g1.animate({ 'transform' : 'r360,175,175' },4000) r2 = s.rect(50,50,100,100,20,20).attr({ 'fill': "blue", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" }) c2 = s.circle(125,125,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c2' }) g2 = s.group(r2,c2).attr({ 'id': 'g2' }) g2.appendTo( g1 ); g2.animate( { 'transform' : 'r360,125,125' },4000); r3 = s.rect(0,0,100,100,20,20).attr({ 'fill': "yellow", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" }) c3 = s.circle(75,75,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c3' }) g3 = s.group(r3,c3).attr({ 'id': 'g3' }) g3.appendTo( g2 ) g3.animate( { 'transform' : 'r360,75,75' },4000) r4 = s.rect(-50,-50,100,100,20,20).attr({ 'fill': "green", 'opacity': "0.8", 'stroke': "black", 'strokeWidth': "2" }) c4 = s.circle(25,25,10).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "4" }).attr({ 'id': 'c4' }) g4 = s.group(r4,c4).attr({ 'id': 'g4' }); g4.appendTo( g3 ) g4.animate( { 'transform' : 'r360,25,25' },4000) </script> </body> </html> ''' return outstring @bg10_40323222.route('/snap_gear') def snap_gear(): outstring = ''' <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>網際 snap 繪圖</title> <!-- IE 9: display inline SVG --> <meta http-equiv="X-UA-Compatible" content="IE=9"> <script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script> <script type="text/javascript" src="/static/snap.svg-min.js"></script> <script> window.onload=function(){ brython(1); } </script> </head> <body> <svg width="800" height="800" viewBox="0 0 800 800" id="svgout"></svg> <script type="text/python"> from javascript import JSConstructor from browser import alert from browser import window, document # 透過 window 與 JSConstructor 從 Brython 物件 snap 擷取 Snap 物件的內容 snap = JSConstructor(window.Snap) s = snap("#svgout") # 畫直線 s.line(0, 0, 100, 100).attr({ 'fill': "silver", 'stroke': "black", 'strokeWidth': "1" }).attr({ 'id': 'line1' }) </script> </body> </html> ''' return outstring @bg10_40323222.route('/threegears', defaults={'n1':17,'n2':29,'n3':15}) @bg10_40323222.route('/threegears/<n1>/<n2>/<n3>') def draw_threegears(n1, n2, n3): outstring=''' <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>網際 snap 繪圖</title> <!-- IE 9: display inline SVG --> <meta http-equiv="X-UA-Compatible" content="IE=9"> <script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script> <script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/Cango-8v03.js"></script> <script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/Cango2D-7v01-min.js"></script> <script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/gearUtils-05.js"></script> <script> window.onload=function(){ brython(1); } </script> <canvas id='gear1' width='800' height='750'></canvas> <script type="text/python"> # 將 導入的 document 設為 doc 主要原因在於與舊程式碼相容 from browser import document as doc # 由於 Python3 與 Javascript 程式碼已經不再混用, 因此來自 Javascript 的變數, 必須居中透過 window 物件轉換 from browser import window # 針對 Javascript 既有的物件, 則必須透過 JSConstructor 轉換 from javascript import JSConstructor import math # 主要用來取得畫布大小 canvas = doc["gear1"] # 此程式採用 Cango Javascript 程式庫繪圖, 因此無需 ctx #ctx = canvas.getContext("2d") # 針對類別的轉換, 將 Cango.js 中的 Cango 物件轉為 Python cango 物件 cango = JSConstructor(window.Cango) # 針對變數的轉換, shapeDefs 在 Cango 中資料型別為變數, 可以透過 window 轉換 shapedefs = window.shapeDefs # 目前 Cango 結合 Animation 在 Brython 尚無法運作, 此刻只能繪製靜態圖形 # in CangoAnimation.js #interpolate1 = window.interpolate # Cobi 與 createGearTooth 都是 Cango Javascript 程式庫中的物件 cobj = JSConstructor(window.Cobj) creategeartooth = JSConstructor(window.createGearTooth) # 經由 Cango 轉換成 Brython 的 cango, 指定將圖畫在 id="plotarea" 的 canvas 上 cgo = cango("gear1") ###################################### # 畫正齒輪輪廓 ##################################### def spur(cx, cy, m, n, pa, theta): # n 為齒數 #n = 17 # pa 為壓力角 #pa = 25 # m 為模數, 根據畫布的寬度, 計算適合的模數大小 # Module = mm of pitch diameter per tooth #m = 0.8*canvas.width/n # pr 為節圓半徑 pr = n*m/2 # gear Pitch radius # generate gear data = creategeartooth(m, n, pa) # Brython 程式中的 print 會將資料印在 Browser 的 console 區 #print(data) gearTooth = cobj(data, "SHAPE", { "fillColor":"#ddd0dd", "border": True, "strokeColor": "#606060" }) #gearTooth.rotate(180/n) # rotate gear 1/2 tooth to mesh, 請注意 rotate 角度為 degree # theta 為角度 gearTooth.rotate(theta) # 單齒的齒形資料經過旋轉後, 將資料複製到 gear 物件中 gear = gearTooth.dup() # gear 為單一齒的輪廓資料 #cgo.render(gearTooth) # 利用單齒輪廓旋轉, 產生整個正齒輪外形 for i in range(1, n): # 將 gearTooth 中的資料複製到 newTooth newTooth = gearTooth.dup() # 配合迴圈, newTooth 的齒形資料進行旋轉, 然後利用 appendPath 方法, 將資料併入 gear newTooth.rotate(360*i/n) # appendPath 為 Cango 程式庫中的方法, 第二個變數為 True, 表示要刪除最前頭的 Move to SVG Path 標註符號 gear.appendPath(newTooth, True) # trim move command = True # 建立軸孔 # add axle hole, hr 為 hole radius hr = 0.6*pr # diameter of gear shaft shaft = cobj(shapedefs.circle(hr), "PATH") shaft.revWinding() gear.appendPath(shaft) # retain the 'moveTo' command for shaft sub path gear.translate(cx, cy) # render 繪出靜態正齒輪輪廓 cgo.render(gear) # 接著繪製齒輪的基準線 deg = math.pi/180 Line = cobj(['M', cx, cy, 'L', cx+pr*math.cos(theta*deg), cy+pr*math.sin(theta*deg)], "PATH", { 'strokeColor':'blue', 'lineWidth': 1}) cgo.render(Line) # 3個齒輪的齒數 n1 = '''+str(n1)+''' n2 = '''+str(n2)+''' n3 = '''+str(n3)+''' # m 為模數, 根據畫布的寬度, 計算適合的模數大小 # Module = mm of pitch diameter per tooth # 利用 80% 的畫布寬度進行繪圖 # 計算模數的對應尺寸 m = canvas.width*0.8/(n1+n2+n3) # 根據齒數與模組計算各齒輪的節圓半徑 pr1 = n1*m/2 pr2 = n2*m/2 pr3 = n3*m/2 # 畫布左右兩側都保留畫布寬度的 10% # 依此計算對應的最左邊齒輪的軸心座標 cx = canvas.width*0.1+pr1 cy = canvas.height/2 # pa 為壓力角 pa = 25 # 畫最左邊齒輪, 定位線旋轉角為 0, 軸心座標 (cx, cy) spur(cx, cy, m, n1, pa, 0) # 第2個齒輪將原始的定位線逆時鐘轉 180 度後, 與第1個齒輪正好齒頂與齒頂對齊 # 只要第2個齒輪再逆時鐘或順時鐘轉動半齒的角度, 即可完成囓合 # 每一個齒分別包括從齒根到齒頂的範圍, 涵蓋角度為 360/n, 因此所謂的半齒角度為 180/n spur(cx+pr1+pr2, cy, m, n2, pa, 180-180/n2) # 第2齒與第3齒的囓合, 首先假定第2齒的定位線在 theta 角為 0 的原始位置 # 如此, 第3齒只要逆時鐘旋轉 180 度後, 再逆時鐘或順時鐘轉動半齒的角度, 即可與第2齒囓合 # 但是第2齒為了與第一齒囓合時, 已經從原始定位線轉了 180-180/n2 度 # 而當第2齒從與第3齒囓合的定位線, 逆時鐘旋轉 180-180/n2 角度後, 原先囓合的第3齒必須要再配合旋轉 (180-180/n2 )*n2/n3 spur(cx+pr1+pr2+pr2+pr3, cy, m, n3, pa, 180-180/n3+(180-180/n2)*n2/n3) </script>''' return outstring
unknown
codeparrot/codeparrot-clean
--- name: 📝 Documentation Report description: Ask us about docs body: - type: markdown attributes: value: > **Thank you for wanting to report a problem with ansible-core documentation!** Please fill out your suggestions below. If the problem seems straightforward, feel free to go ahead and [submit a pull request] instead! ⚠ Verify first that your issue is not [already reported on GitHub][issue search]. Also test if the latest release and devel branch are affected too. **Tip:** If you are seeking community support, please see [Communicating with the Ansible community][communication] to get in touch and ask questions. [communication]: https://docs.ansible.com/ansible/devel/community/communication.html [issue search]: ../search?q=is%3Aissue&type=issues [submit a pull request]: https://docs.ansible.com/ansible-core/devel/community/documentation_contributions.html - type: markdown attributes: value: > **Check the repository for your issue.** Source files for Ansible community documentation are hosted in different repositories. Please make sure to file an issue in the correct project. Documentation for modules/plugins/etc that are officially supported by the Ansible Core Engineering team is available in this (`ansible/ansible`) repository. The Installation Guide, Playbook Guide, Developer Guide, and other documentation is available in the [ansible/ansible-documentation] repository. Documentation for other modules/plugins/etc is likely to be available in one of the [Ansible Collections][collections index]. If available in the collection documentation, select the **Issue Tracker** button to go directly to the GitHub issues. [ansible/ansible-documentation]: /ansible/ansible-documentation [collections index]: https://docs.ansible.com/ansible/latest/collections/index.html - type: textarea attributes: label: Summary description: > Explain the problem briefly below, add suggestions to wording or structure. **HINT:** Did you know the documentation has a `View on GitHub` link on some pages? Feel free to use it to start a pull request right from the GitHub UI! placeholder: >- I was reading the ansible-core documentation of version X and I'm having problems understanding Y. It would be very helpful if that got rephrased as Z. validations: required: true - type: dropdown attributes: label: Issue Type description: This is a marker for our automatic bot. Do not change it. options: - Documentation Report validations: required: true - type: input attributes: label: Component Name description: > Write the short name of the rst file, module, plugin, task or feature below, *use your best guess if unsure*. placeholder: lib/ansible/modules/copy.py validations: required: true - type: textarea attributes: label: Ansible Version description: >- Paste verbatim output from `ansible --version` below, under the prompt line. Please don't wrap it with triple backticks — your whole input will be turned into a code snippet automatically. render: console value: | $ ansible --version placeholder: | $ ansible --version ansible [core 2.11.0b4.post0] (detached HEAD ref: refs/) last updated 2021/04/02 00:33:35 (GMT +200) config file = None configured module search path = ['~/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = ~/src/github/ansible/ansible/lib/ansible ansible collection location = ~/.ansible/collections:/usr/share/ansible/collections executable location = bin/ansible python version = 3.9.0 (default, Oct 26 2020, 13:08:59) [GCC 10.2.0] jinja version = 2.11.3 libyaml = True validations: required: true - type: textarea attributes: label: Configuration description: >- Paste verbatim output from `ansible-config dump --only-changed -t all` below, under the prompt line. Remember to redact secret values. You can easily filter Galaxy server secrets using grep, for example `ansible-config dump --only-changed -t all | grep -Ev 'token|password|client_secret'`. (if using a version older than ansible-core 2.12 you should omit the '-t all') Please don't wrap it with triple backticks — your whole input will be turned into a code snippet automatically. render: console value: | # if using a version older than ansible-core 2.12 you should omit the '-t all' $ ansible-config dump --only-changed -t all placeholder: | # if using a version older than ansible-core 2.12 you should omit the '-t all' $ ansible-config dump --only-changed -t all DEFAULT_GATHERING(~/src/github/ansible/ansible/ansible.cfg) = smart DEFAULT_HOST_LIST(~/src/github/ansible/ansible/ansible.cfg) = ['~/src/github/ansible/ansible/hosts'] DEFAULT_VAULT_PASSWORD_FILE(~/src/github/ansible/ansible/ansible.cfg) = ~/src/github/ansible/ansible/vault/print-password.sh validations: required: true - type: textarea attributes: label: OS / Environment description: >- Provide all relevant information below, e.g. OS version, browser, etc. placeholder: Fedora 33, Firefox etc. validations: required: true - type: textarea attributes: label: Additional Information description: | Describe how this improves the documentation, e.g. before/after situation or screenshots. **HINT:** You can paste https://gist.github.com links for larger files. placeholder: >- When the improvement is applied, it makes it more straightforward to understand X. validations: required: true - type: markdown attributes: value: > *One last thing...* *Please, complete **all** sections as described, this form is [processed automatically by a robot][ansibot help].* Thank you for your collaboration! [ansibot help]: /ansible/ansibotmini#ansibotmini - type: checkboxes attributes: label: Code of Conduct description: | Read the [Ansible Code of Conduct][CoC] first. [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml options: - label: I agree to follow the Ansible Code of Conduct required: true ...
unknown
github
https://github.com/ansible/ansible
.github/ISSUE_TEMPLATE/documentation_report.yml
vaulted_utf8_value: !vault | $ANSIBLE_VAULT;1.1;AES256 39313961356631343234656136636231663539363963386364653436346133366366633031366364 3332376636333837333036633662316135383365343335380a393331663434663238666537343163 62363561336431623666633735313766613663333736653064373632666131356434336537383336 3333343436613232330a643461363831633166333237653530353131316361643465353132616362 3461
unknown
github
https://github.com/ansible/ansible
test/integration/targets/ansible-vault/host_vars/testhost.yml
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package views import ( "fmt" "strings" "github.com/hashicorp/terraform/internal/command/arguments" "github.com/hashicorp/terraform/internal/command/format" "github.com/hashicorp/terraform/internal/plans" "github.com/hashicorp/terraform/internal/states/statefile" "github.com/hashicorp/terraform/internal/terraform" "github.com/hashicorp/terraform/internal/tfdiags" ) func NewQueryOperation(vt arguments.ViewType, inAutomation bool, view *View) Operation { switch vt { case arguments.ViewHuman: return &QueryOperationHuman{view: view, inAutomation: inAutomation} default: panic(fmt.Sprintf("unknown view type %v", vt)) } } type QueryOperationHuman struct { view *View // inAutomation indicates that commands are being run by an // automated system rather than directly at a command prompt. // // This is a hint not to produce messages that expect that a user can // run a follow-up command, perhaps because Terraform is running in // some sort of workflow automation tool that abstracts away the // exact commands that are being run. inAutomation bool } var _ Operation = (*QueryOperationHuman)(nil) func (v *QueryOperationHuman) Interrupted() { v.view.streams.Println(format.WordWrap(interrupted, v.view.outputColumns())) } func (v *QueryOperationHuman) FatalInterrupt() { v.view.streams.Eprintln(format.WordWrap(fatalInterrupt, v.view.errorColumns())) } func (v *QueryOperationHuman) Stopping() { v.view.streams.Println("Stopping operation...") } func (v *QueryOperationHuman) Cancelled(planMode plans.Mode) { v.view.streams.Println("Query cancelled.") } func (v *QueryOperationHuman) EmergencyDumpState(stateFile *statefile.File) error { return nil } func (v *QueryOperationHuman) Plan(plan *plans.Plan, schemas *terraform.Schemas) { // The hook for individual query blocks do not display any output when the results are empty, // so we will display a grouped warning message here for the empty queries. emptyBlocks := []string{} for _, query := range plan.Changes.Queries { pSchema := schemas.ProviderSchema(query.ProviderAddr.Provider) addr := query.Addr schema := pSchema.ListResourceTypes[addr.Resource.Resource.Type] results, err := query.Decode(schema) if err != nil { v.view.streams.Eprintln(err) continue } data := results.Results.Value.GetAttr("data") if data.LengthInt() == 0 { emptyBlocks = append(emptyBlocks, addr.String()) } } if len(emptyBlocks) > 0 { msg := fmt.Sprintf(v.view.colorize.Color("[bold][yellow]Warning:[reset][bold] list block(s) [%s] returned 0 results.\n"), strings.Join(emptyBlocks, ", ")) v.view.streams.Println(format.WordWrap(msg, v.view.outputColumns())) } } func (v *QueryOperationHuman) PlannedChange(change *plans.ResourceInstanceChangeSrc) { } func (v *QueryOperationHuman) PlanNextStep(planPath string, genConfigPath string) { } func (v *QueryOperationHuman) Diagnostics(diags tfdiags.Diagnostics) { v.view.Diagnostics(diags) } type QueryOperationJSON struct { view *JSONView } var _ Operation = (*QueryOperationJSON)(nil) func (v *QueryOperationJSON) Interrupted() { v.view.Log(interrupted) } func (v *QueryOperationJSON) FatalInterrupt() { v.view.Log(fatalInterrupt) } func (v *QueryOperationJSON) Stopping() { v.view.Log("Stopping operation...") } func (v *QueryOperationJSON) Cancelled(planMode plans.Mode) { v.view.Log("Query cancelled") } func (v *QueryOperationJSON) EmergencyDumpState(stateFile *statefile.File) error { return nil } func (v *QueryOperationJSON) Plan(plan *plans.Plan, schemas *terraform.Schemas) { } func (v *QueryOperationJSON) PlannedChange(change *plans.ResourceInstanceChangeSrc) { } func (v *QueryOperationJSON) PlanNextStep(planPath string, genConfigPath string) { } func (v *QueryOperationJSON) Diagnostics(diags tfdiags.Diagnostics) { v.view.Diagnostics(diags) }
go
github
https://github.com/hashicorp/terraform
internal/command/views/query_operation.go
apiVersion: v1 kind: List items: - kind: Foo apiVersion: company.com/v1 metadata: name: test-list labels: pruneGroup: "true" someField: modifiedField - kind: Bar apiVersion: company.com/v1 metadata: name: test-list labels: pruneGroup: "true" someField: modifiedField
unknown
github
https://github.com/kubernetes/kubernetes
hack/testdata/CRD/multi-crd-list-deleted-field.yaml
""" Tests for contentstore/views/user.py. """ import json from contentstore.tests.utils import CourseTestCase from django.contrib.auth.models import User from student.models import CourseEnrollment from xmodule.modulestore.django import loc_mapper from student.roles import CourseStaffRole, CourseInstructorRole from student import auth class UsersTestCase(CourseTestCase): def setUp(self): super(UsersTestCase, self).setUp() self.ext_user = User.objects.create_user( "joe", "joe@comedycentral.com", "haha") self.ext_user.is_active = True self.ext_user.is_staff = False self.ext_user.save() self.inactive_user = User.objects.create_user( "carl", "carl@comedycentral.com", "haha") self.inactive_user.is_active = False self.inactive_user.is_staff = False self.inactive_user.save() self.location = loc_mapper().translate_location(self.course.location.course_id, self.course.location, False, True) self.index_url = self.location.url_reverse('course_team', '') self.detail_url = self.location.url_reverse('course_team', self.ext_user.email) self.inactive_detail_url = self.location.url_reverse('course_team', self.inactive_user.email) self.invalid_detail_url = self.location.url_reverse('course_team', "nonexistent@user.com") def test_index(self): resp = self.client.get(self.index_url, HTTP_ACCEPT='text/html') # ext_user is not currently a member of the course team, and so should # not show up on the page. self.assertNotContains(resp, self.ext_user.email) def test_index_member(self): auth.add_users(self.user, CourseStaffRole(self.course_locator), self.ext_user) resp = self.client.get(self.index_url, HTTP_ACCEPT='text/html') self.assertContains(resp, self.ext_user.email) def test_detail(self): resp = self.client.get(self.detail_url) self.assertEqual(resp.status_code, 200) result = json.loads(resp.content) self.assertEqual(result["role"], None) self.assertTrue(result["active"]) def test_detail_inactive(self): resp = self.client.get(self.inactive_detail_url) self.assertEqual(resp.status_code, 200) result = json.loads(resp.content) self.assertFalse(result["active"]) def test_detail_invalid(self): resp = self.client.get(self.invalid_detail_url) self.assertEqual(resp.status_code, 404) result = json.loads(resp.content) self.assertIn("error", result) def test_detail_post(self): resp = self.client.post( self.detail_url, data={"role": None}, ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) # no content: should not be in any roles self.assertFalse(auth.has_access(ext_user, CourseStaffRole(self.course_locator))) self.assertFalse(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) self.assert_not_enrolled() def test_detail_post_staff(self): resp = self.client.post( self.detail_url, data=json.dumps({"role": "staff"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseStaffRole(self.course_locator))) self.assertFalse(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) self.assert_enrolled() def test_detail_post_staff_other_inst(self): auth.add_users(self.user, CourseInstructorRole(self.course_locator), self.user) resp = self.client.post( self.detail_url, data=json.dumps({"role": "staff"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseStaffRole(self.course_locator))) self.assertFalse(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) self.assert_enrolled() # check that other user is unchanged user = User.objects.get(email=self.user.email) self.assertTrue(auth.has_access(user, CourseInstructorRole(self.course_locator))) self.assertFalse(CourseStaffRole(self.course_locator).has_user(user)) def test_detail_post_instructor(self): resp = self.client.post( self.detail_url, data=json.dumps({"role": "instructor"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) self.assertFalse(CourseStaffRole(self.course_locator).has_user(ext_user)) self.assert_enrolled() def test_detail_post_missing_role(self): resp = self.client.post( self.detail_url, data=json.dumps({"toys": "fun"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 400) result = json.loads(resp.content) self.assertIn("error", result) self.assert_not_enrolled() def test_detail_post_no_json(self): resp = self.client.post( self.detail_url, data={"role": "staff"}, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseStaffRole(self.course_locator))) self.assertFalse(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) self.assert_enrolled() def test_detail_delete_staff(self): auth.add_users(self.user, CourseStaffRole(self.course_locator), self.ext_user) resp = self.client.delete( self.detail_url, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertFalse(auth.has_access(ext_user, CourseStaffRole(self.course_locator))) def test_detail_delete_instructor(self): auth.add_users(self.user, CourseInstructorRole(self.course_locator), self.ext_user, self.user) resp = self.client.delete( self.detail_url, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertFalse(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) def test_delete_last_instructor(self): auth.add_users(self.user, CourseInstructorRole(self.course_locator), self.ext_user) resp = self.client.delete( self.detail_url, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 400) result = json.loads(resp.content) self.assertIn("error", result) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) def test_post_last_instructor(self): auth.add_users(self.user, CourseInstructorRole(self.course_locator), self.ext_user) resp = self.client.post( self.detail_url, data={"role": "staff"}, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 400) result = json.loads(resp.content) self.assertIn("error", result) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseInstructorRole(self.course_locator))) def test_permission_denied_self(self): auth.add_users(self.user, CourseStaffRole(self.course_locator), self.user) self.user.is_staff = False self.user.save() self_url = self.location.url_reverse('course_team', self.user.email) resp = self.client.post( self_url, data={"role": "instructor"}, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 400) result = json.loads(resp.content) self.assertIn("error", result) def test_permission_denied_other(self): auth.add_users(self.user, CourseStaffRole(self.course_locator), self.user) self.user.is_staff = False self.user.save() resp = self.client.post( self.detail_url, data={"role": "instructor"}, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 400) result = json.loads(resp.content) self.assertIn("error", result) def test_staff_can_delete_self(self): auth.add_users(self.user, CourseStaffRole(self.course_locator), self.user) self.user.is_staff = False self.user.save() self_url = self.location.url_reverse('course_team', self.user.email) resp = self.client.delete(self_url) self.assertEqual(resp.status_code, 204) # reload user from DB user = User.objects.get(email=self.user.email) self.assertFalse(auth.has_access(user, CourseStaffRole(self.course_locator))) def test_staff_cannot_delete_other(self): auth.add_users(self.user, CourseStaffRole(self.course_locator), self.user, self.ext_user) self.user.is_staff = False self.user.save() resp = self.client.delete(self.detail_url) self.assertEqual(resp.status_code, 400) result = json.loads(resp.content) self.assertIn("error", result) # reload user from DB ext_user = User.objects.get(email=self.ext_user.email) self.assertTrue(auth.has_access(ext_user, CourseStaffRole(self.course_locator))) def test_user_not_initially_enrolled(self): # Verify that ext_user is not enrolled in the new course before being added as a staff member. self.assert_not_enrolled() def test_remove_staff_does_not_unenroll(self): # Add user with staff permissions. self.client.post( self.detail_url, data=json.dumps({"role": "staff"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assert_enrolled() # Remove user from staff on course. Will not un-enroll them from the course. resp = self.client.delete( self.detail_url, HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) self.assert_enrolled() def test_staff_to_instructor_still_enrolled(self): # Add user with staff permission. self.client.post( self.detail_url, data=json.dumps({"role": "staff"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assert_enrolled() # Now add with instructor permission. Verify still enrolled. resp = self.client.post( self.detail_url, data=json.dumps({"role": "instructor"}), content_type="application/json", HTTP_ACCEPT="application/json", ) self.assertEqual(resp.status_code, 204) self.assert_enrolled() def assert_not_enrolled(self): """ Asserts that self.ext_user is not enrolled in self.course. """ self.assertFalse( CourseEnrollment.is_enrolled(self.ext_user, self.course.location.course_id), 'Did not expect ext_user to be enrolled in course' ) def assert_enrolled(self): """ Asserts that self.ext_user is enrolled in self.course. """ self.assertTrue( CourseEnrollment.is_enrolled(self.ext_user, self.course.location.course_id), 'User ext_user should have been enrolled in the course' )
unknown
codeparrot/codeparrot-clean
# mssql/information_schema.py # Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # TODO: should be using the sys. catalog with SQL Server, not information schema from sqlalchemy import Table, MetaData, Column from sqlalchemy.types import String, Unicode, Integer, TypeDecorator ischema = MetaData() class CoerceUnicode(TypeDecorator): impl = Unicode def process_bind_param(self, value, dialect): if isinstance(value, str): value = value.decode(dialect.encoding) return value schemata = Table("SCHEMATA", ischema, Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"), Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"), Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"), schema="INFORMATION_SCHEMA") tables = Table("TABLES", ischema, Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"), schema="INFORMATION_SCHEMA") columns = Table("COLUMNS", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("COLUMN_NAME", CoerceUnicode, key="column_name"), Column("IS_NULLABLE", Integer, key="is_nullable"), Column("DATA_TYPE", String, key="data_type"), Column("ORDINAL_POSITION", Integer, key="ordinal_position"), Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"), Column("NUMERIC_PRECISION", Integer, key="numeric_precision"), Column("NUMERIC_SCALE", Integer, key="numeric_scale"), Column("COLUMN_DEFAULT", Integer, key="column_default"), Column("COLLATION_NAME", String, key="collation_name"), schema="INFORMATION_SCHEMA") constraints = Table("TABLE_CONSTRAINTS", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"), schema="INFORMATION_SCHEMA") column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("COLUMN_NAME", CoerceUnicode, key="column_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), schema="INFORMATION_SCHEMA") key_constraints = Table("KEY_COLUMN_USAGE", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("COLUMN_NAME", CoerceUnicode, key="column_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), Column("ORDINAL_POSITION", Integer, key="ordinal_position"), schema="INFORMATION_SCHEMA") ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema, Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"), Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), # TODO: is CATLOG misspelled ? Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, key="unique_constraint_catalog"), Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode, key="unique_constraint_schema"), Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"), Column("MATCH_OPTION", String, key="match_option"), Column("UPDATE_RULE", String, key="update_rule"), Column("DELETE_RULE", String, key="delete_rule"), schema="INFORMATION_SCHEMA") views = Table("VIEWS", ischema, Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"), Column("CHECK_OPTION", String, key="check_option"), Column("IS_UPDATABLE", String, key="is_updatable"), schema="INFORMATION_SCHEMA")
unknown
codeparrot/codeparrot-clean
import sys from test import test_support, list_tests class ListTest(list_tests.CommonTest): type2test = list def test_basic(self): self.assertEqual(list([]), []) l0_3 = [0, 1, 2, 3] l0_3_bis = list(l0_3) self.assertEqual(l0_3, l0_3_bis) self.assert_(l0_3 is not l0_3_bis) self.assertEqual(list(()), []) self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3]) self.assertEqual(list(''), []) self.assertEqual(list('spam'), ['s', 'p', 'a', 'm']) if sys.maxsize == 0x7fffffff: # This test can currently only work on 32-bit machines. # XXX If/when PySequence_Length() returns a ssize_t, it should be # XXX re-enabled. # Verify clearing of bug #556025. # This assumes that the max data size (sys.maxint) == max # address size this also assumes that the address size is at # least 4 bytes with 8 byte addresses, the bug is not well # tested # # Note: This test is expected to SEGV under Cygwin 1.3.12 or # earlier due to a newlib bug. See the following mailing list # thread for the details: # http://sources.redhat.com/ml/newlib/2002/msg00369.html self.assertRaises(MemoryError, list, xrange(sys.maxint // 2)) # This code used to segfault in Py2.4a3 x = [] x.extend(-y for y in x) self.assertEqual(x, []) def test_truth(self): super(ListTest, self).test_truth() self.assert_(not []) self.assert_([42]) def test_identity(self): self.assert_([] is not []) def test_len(self): super(ListTest, self).test_len() self.assertEqual(len([]), 0) self.assertEqual(len([0]), 1) self.assertEqual(len([0, 1, 2]), 3) def test_overflow(self): lst = [4, 5, 6, 7] n = int((sys.maxint*2+2) // len(lst)) def mul(a, b): return a * b def imul(a, b): a *= b self.assertRaises((MemoryError, OverflowError), mul, lst, n) self.assertRaises((MemoryError, OverflowError), imul, lst, n) def test_main(verbose=None): test_support.run_unittest(ListTest) # verify reference counting import sys if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in xrange(len(counts)): test_support.run_unittest(ListTest) gc.collect() counts[i] = sys.gettotalrefcount() print counts if __name__ == "__main__": test_main(verbose=True)
unknown
codeparrot/codeparrot-clean
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Model subclassing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import six from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import test from tensorflow.python.training.checkpointable import data_structures from tensorflow.python.training.rmsprop import RMSPropOptimizer try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None # pylint: disable=not-callable class SimpleTestModel(keras.Model): def __init__(self, use_bn=False, use_dp=False, num_classes=10): super(SimpleTestModel, self).__init__(name='test_model') self.use_bn = use_bn self.use_dp = use_dp self.num_classes = num_classes self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(num_classes, activation='softmax') if self.use_dp: self.dp = keras.layers.Dropout(0.5) if self.use_bn: self.bn = keras.layers.BatchNormalization(axis=-1) def call(self, x): x = self.dense1(x) if self.use_dp: x = self.dp(x) if self.use_bn: x = self.bn(x) return self.dense2(x) class SimpleConvTestModel(keras.Model): def __init__(self, num_classes=10): super(SimpleConvTestModel, self).__init__(name='test_model') self.num_classes = num_classes self.conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu') self.flatten = keras.layers.Flatten() self.dense1 = keras.layers.Dense(num_classes, activation='softmax') def call(self, x): x = self.conv1(x) x = self.flatten(x) return self.dense1(x) class MultiIOTestModel(keras.Model): def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)): super(MultiIOTestModel, self).__init__(name='test_model') self.use_bn = use_bn self.use_dp = use_dp self.num_classes = num_classes self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax') self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax') if use_dp: self.dp = keras.layers.Dropout(0.5) if use_bn: self.bn = keras.layers.BatchNormalization() def call(self, inputs): x1, x2 = inputs x1 = self.dense1(x1) x2 = self.dense1(x2) if self.use_dp: x1 = self.dp(x1) if self.use_bn: x2 = self.bn(x2) return [self.dense2(x1), self.dense3(x2)] class NestedTestModel1(keras.Model): """A model subclass nested inside a model subclass. """ def __init__(self, num_classes=2): super(NestedTestModel1, self).__init__(name='nested_model_1') self.num_classes = num_classes self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(num_classes, activation='relu') self.bn = keras.layers.BatchNormalization() self.test_net = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True) def call(self, inputs): x = self.dense1(inputs) x = self.bn(x) x = self.test_net(x) return self.dense2(x) def get_functional_graph_model(input_dim, num_classes): # A simple functional-API model (a.k.a. graph network) inputs = keras.Input(shape=(input_dim,)) x = keras.layers.Dense(32, activation='relu')(inputs) x = keras.layers.BatchNormalization()(x) outputs = keras.layers.Dense(num_classes)(x) return keras.Model(inputs, outputs) class NestedTestModel2(keras.Model): """A model subclass with a functional-API graph network inside. """ def __init__(self, num_classes=2): super(NestedTestModel2, self).__init__(name='nested_model_2') self.num_classes = num_classes self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(num_classes, activation='relu') self.bn = self.bn = keras.layers.BatchNormalization() self.test_net = get_functional_graph_model(32, 4) def call(self, inputs): x = self.dense1(inputs) x = self.bn(x) x = self.test_net(x) return self.dense2(x) def get_nested_model_3(input_dim, num_classes): # A functional-API model with a subclassed model inside. # NOTE: this requires the inner subclass to implement `compute_output_shape`. inputs = keras.Input(shape=(input_dim,)) x = keras.layers.Dense(32, activation='relu')(inputs) x = keras.layers.BatchNormalization()(x) class Inner(keras.Model): def __init__(self): super(Inner, self).__init__() self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(5, activation='relu') self.bn = keras.layers.BatchNormalization() def call(self, inputs): x = self.dense1(inputs) x = self.dense2(x) return self.bn(x) test_model = Inner() x = test_model(x) outputs = keras.layers.Dense(num_classes)(x) return keras.Model(inputs, outputs, name='nested_model_3') class ModelSubclassingTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_custom_build(self): class DummyModel(keras.Model): def __init__(self): super(DummyModel, self).__init__() self.dense1 = keras.layers.Dense(32, activation='relu') self.uses_custom_build = False def call(self, inputs): return self.dense1(inputs) def build(self, input_shape): self.uses_custom_build = True test_model = DummyModel() dummy_data = array_ops.ones((32, 50)) test_model(dummy_data) self.assertTrue(test_model.uses_custom_build, 'Model should use user ' 'defined build when called.') @test_util.run_in_graph_and_eager_modes def test_invalid_input_shape_build(self): num_classes = 2 input_dim = 50 model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) with self.assertRaisesRegexp( ValueError, 'input shape is not one of the valid types'): model.build(input_shape=tensor_shape.Dimension(input_dim)) @test_util.run_in_graph_and_eager_modes def test_embed_dtype_with_subclass_build(self): class Embedding(keras.layers.Layer): """An Embedding layer.""" def __init__(self, vocab_size, embedding_dim, **kwargs): super(Embedding, self).__init__(**kwargs) self.vocab_size = vocab_size self.embedding_dim = embedding_dim def build(self, _): self.embedding = self.add_variable( 'embedding_kernel', shape=[self.vocab_size, self.embedding_dim], dtype=np.float32, initializer=init_ops.random_uniform_initializer(-0.1, 0.1), trainable=True) def call(self, x): return embedding_ops.embedding_lookup(self.embedding, x) class EmbedModel(keras.Model): def __init__(self, vocab_size, embed_size): super(EmbedModel, self).__init__() self.embed1 = Embedding(vocab_size, embed_size) def call(self, inputs): return self.embed1(inputs) model = EmbedModel(100, 20) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) with self.assertRaisesRegexp( ValueError, 'if your layers do not support float type inputs'): model.build(input_shape=(35, 20)) @test_util.run_in_graph_and_eager_modes def test_single_time_step_rnn_build(self): dim = 4 timesteps = 1 batch_input_shape = (None, timesteps, dim) units = 3 class SimpleRNNModel(keras.Model): def __init__(self): super(SimpleRNNModel, self).__init__() self.lstm = keras.layers.LSTM(units) def call(self, inputs): return self.lstm(inputs) model = SimpleRNNModel() self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) model.build(batch_input_shape) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') model(array_ops.ones((32, timesteps, dim))) @test_util.run_in_graph_and_eager_modes def test_single_io_subclass_build(self): num_classes = 2 input_dim = 50 batch_size = None model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) model.build(input_shape=(batch_size, input_dim)) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') model(array_ops.ones((32, input_dim))) @test_util.run_in_graph_and_eager_modes def test_single_io_dimension_subclass_build(self): num_classes = 2 input_dim = tensor_shape.Dimension(50) batch_size = tensor_shape.Dimension(None) model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) model.build(input_shape=(batch_size, input_dim)) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') model(array_ops.ones((32, input_dim))) @test_util.run_in_graph_and_eager_modes def test_multidim_io_subclass_build(self): num_classes = 10 # Input size, e.g. image batch_size = 32 input_shape = (32, 32, 3) model = SimpleConvTestModel(num_classes) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) batch_input_shape = (batch_size,) + input_shape model.build(input_shape=batch_input_shape) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') model(array_ops.ones(batch_input_shape)) @test_util.run_in_graph_and_eager_modes def test_tensorshape_io_subclass_build(self): num_classes = 10 # Input size, e.g. image batch_size = None input_shape = (32, 32, 3) model = SimpleConvTestModel(num_classes) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) model.build( input_shape=tensor_shape.TensorShape((batch_size,) + input_shape)) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') model(array_ops.ones((32,) + input_shape)) def test_subclass_save_model(self): num_classes = 10 # Input size, e.g. image batch_size = None input_shape = (32, 32, 3) model = SimpleConvTestModel(num_classes) self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) model.build( input_shape=tensor_shape.TensorShape((batch_size,) + input_shape)) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') weights = model.get_weights() tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt') model.save_weights(tf_format_name) if h5py is not None: hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5') model.save_weights(hdf5_format_name) model = SimpleConvTestModel(num_classes) model.build( input_shape=tensor_shape.TensorShape((batch_size,) + input_shape)) if h5py is not None: model.load_weights(hdf5_format_name) self.assertAllClose(weights, model.get_weights()) model.load_weights(tf_format_name) self.assertAllClose(weights, model.get_weights()) @test_util.run_in_graph_and_eager_modes def test_multi_io_subclass_build(self): batch_size = None num_samples = 1000 input_dim = 50 model = MultiIOTestModel() self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) batch_input_shape = tensor_shape.TensorShape((batch_size, input_dim)) model.build( input_shape=[batch_input_shape, batch_input_shape]) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') x1 = array_ops.ones((num_samples, input_dim)) x2 = array_ops.ones((num_samples, input_dim)) model([x1, x2]) @test_util.run_in_graph_and_eager_modes def test_single_io_workflow_with_np_arrays(self): num_classes = 2 num_samples = 100 input_dim = 50 model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True) model.compile( loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), metrics=['acc', keras.metrics.CategoricalAccuracy()]) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) @test_util.run_in_graph_and_eager_modes def test_multi_io_workflow_with_np_arrays(self): num_classes = (2, 3) num_samples = 1000 input_dim = 50 model = MultiIOTestModel(num_classes=num_classes, use_dp=True, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), metrics=['acc']) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) _ = model.evaluate([x1, x2], [y1, y2], verbose=0) def test_single_io_workflow_with_tensors(self): num_classes = 2 num_samples = 10 input_dim = 50 with self.cached_session(): model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) x = array_ops.ones((num_samples, input_dim)) y = array_ops.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0) _ = model.evaluate(steps=10, verbose=0) def test_multi_io_workflow_with_tensors(self): num_classes = (2, 3) num_samples = 10 input_dim = 50 with self.cached_session(): model = MultiIOTestModel(num_classes=num_classes, use_dp=True, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) x1 = array_ops.ones((num_samples, input_dim)) x2 = array_ops.ones((num_samples, input_dim)) y1 = array_ops.zeros((num_samples, num_classes[0])) y2 = array_ops.zeros((num_samples, num_classes[1])) model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0) _ = model.evaluate(steps=10, verbose=0) @test_util.run_in_graph_and_eager_modes def test_single_io_workflow_with_dataset_iterators(self): num_classes = 2 num_samples = 10 input_dim = 50 with self.cached_session(): model = SimpleTestModel(num_classes=num_classes, use_dp=True, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) dataset = dataset_ops.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) iterator = dataset.make_one_shot_iterator() model.fit(iterator, epochs=2, steps_per_epoch=10, verbose=0) _ = model.evaluate(iterator, steps=10, verbose=0) def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self): num_classes = (2, 3) num_samples = 1000 input_dim = 50 with self.cached_session(): model = MultiIOTestModel(num_classes=num_classes, use_dp=True, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) x2_placeholder = array_ops.placeholder( dtype='float32', shape=(None, input_dim)) model._set_inputs([x1, x2_placeholder]) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) _ = model.evaluate([x1, x2], [y1, y2], verbose=0) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def test_attributes(self): # layers, weights, trainable_weights, non_trainable_weights, inputs, outputs num_classes = (2, 3) num_samples = 100 input_dim = 50 model = MultiIOTestModel(num_classes=num_classes, use_bn=True) x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) self.assertEqual(model.name, 'test_model') self.assertEqual(model.built, False) self.assertEqual(len(model.weights), 0) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) model.train_on_batch([x1, x2], [y1, y2]) self.assertEqual(model.built, True) self.assertEqual(len(model.layers), 4) self.assertEqual(len(model.weights), 10) self.assertEqual(len(model.trainable_weights), 8) self.assertEqual(len(model.non_trainable_weights), 2) self.assertEqual(len(model.inputs), 2) self.assertEqual(len(model.outputs), 2) @test_util.run_in_graph_and_eager_modes def test_updates(self): # test that updates get run during training num_samples = 100 input_dim = 50 class BNNet(keras.Model): def __init__(self): super(BNNet, self).__init__() self.bn = keras.layers.BatchNormalization(beta_initializer='ones', gamma_initializer='ones') def call(self, inputs): return self.bn(inputs) x = np.ones((num_samples, input_dim)) y = np.ones((num_samples, input_dim)) model = BNNet() model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) y_ref = model.predict(x) model.train_on_batch(x, y) y_new = model.predict(x) self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1) def test_updates_and_losses_for_nested_models_in_subclassed_model(self): # Case 1: deferred-build sequential nested in subclass. class TestModel1(keras.Model): def __init__(self): super(TestModel1, self).__init__() self.fc = keras.layers.Dense(10, input_shape=(784,), activity_regularizer='l1') self.bn = keras.Sequential([keras.layers.BatchNormalization(axis=1)]) def call(self, x): return self.bn(self.fc(x)) with self.cached_session(): model = TestModel1() x = array_ops.ones(shape=[100, 784], dtype='float32') model(x) self.assertEqual(len(model.get_updates_for(x)), 2) self.assertEqual(len(model.get_losses_for(x)), 1) # Case 2: placeholder-sequential nested in subclass. class TestModel2(keras.Model): def __init__(self): super(TestModel2, self).__init__() self.fc = keras.layers.Dense(10, input_shape=(784,), activity_regularizer='l1') self.bn = keras.Sequential( [keras.layers.BatchNormalization(axis=1, input_shape=(10,))]) def call(self, x): return self.bn(self.fc(x)) with self.cached_session(): model = TestModel2() x = array_ops.ones(shape=[100, 784], dtype='float32') model(x) self.assertEqual(len(model.get_updates_for(x)), 2) self.assertEqual(len(model.get_losses_for(x)), 1) # Case 3: functional-API model nested in subclass. inputs = keras.Input((10,)) outputs = keras.layers.BatchNormalization(axis=1)(inputs) bn = keras.Model(inputs, outputs) class TestModel3(keras.Model): def __init__(self): super(TestModel3, self).__init__() self.fc = keras.layers.Dense(10, input_shape=(784,), activity_regularizer='l1') self.bn = bn def call(self, x): return self.bn(self.fc(x)) with self.cached_session(): model = TestModel3() x = array_ops.ones(shape=[100, 784], dtype='float32') model(x) self.assertEqual(len(model.get_updates_for(x)), 2) self.assertEqual(len(model.get_losses_for(x)), 1) @test_util.run_in_graph_and_eager_modes def test_training_and_inference_behavior(self): # test that dropout is applied in training and not inference num_samples = 100 input_dim = 50 class DPNet(keras.Model): def __init__(self): super(DPNet, self).__init__() self.dp = keras.layers.Dropout(0.5) self.dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones') def call(self, inputs): x = self.dp(inputs) return self.dense(x) model = DPNet() x = np.ones((num_samples, input_dim)) y = model.predict(x) self.assertEqual(np.sum(y), np.sum(x)) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) loss = model.train_on_batch(x, y) self.assertGreater(loss, 0.1) @test_util.run_in_graph_and_eager_modes def test_training_methods(self): # test fit, train_on_batch # on different input types: list, dict num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) model.fit({'input_1': x1, 'input_2': x2}, {'output_1': y1, 'output_2': y2}, epochs=2, batch_size=32) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0, validation_data=([x1, x2], [y1, y2])) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) model.train_on_batch([x1, x2], [y1, y2]) model.train_on_batch({'input_1': x1, 'input_2': x2}, {'output_1': y1, 'output_2': y2}) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def test_inference_methods(self): # test predict, evaluate, test_on_batch, predict_on_batch # on different input types: list, dict num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) model.evaluate([x1, x2], [y1, y2]) model.test_on_batch([x1, x2], [y1, y2]) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) model.predict([x1, x2]) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) model.predict_on_batch([x1, x2]) @test_util.run_in_graph_and_eager_modes def test_trainable_mutation(self): # test that you can change `trainable` on a model or layer, and that # it freezes the model state during training # TODO(fchollet): add test after we unify BN behavior in eager and symbolic. pass @test_util.run_in_graph_and_eager_modes def test_saving(self): num_classes = (2, 3) num_samples = 100 input_dim = 50 x1 = np.ones((num_samples, input_dim)) x2 = np.ones((num_samples, input_dim)) y1 = np.zeros((num_samples, num_classes[0])) y2 = np.zeros((num_samples, num_classes[1])) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0) y_ref_1, y_ref_2 = model.predict([x1, x2]) tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt') model.save_weights(tf_format_name) if h5py is not None: hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5') model.save_weights(hdf5_format_name) model = MultiIOTestModel(num_classes=num_classes, use_bn=True) if h5py is not None: with self.assertRaises(ValueError): model.load_weights(hdf5_format_name) model.load_weights(tf_format_name) y1, y2 = model.predict([x1, x2]) self.assertAllClose(y_ref_1, y1, atol=1e-5) self.assertAllClose(y_ref_2, y2, atol=1e-5) if h5py is not None: model.load_weights(hdf5_format_name) y1, y2 = model.predict([x1, x2]) self.assertAllClose(y_ref_1, y1, atol=1e-5) self.assertAllClose(y_ref_2, y2, atol=1e-5) @test_util.run_in_graph_and_eager_modes def test_summary(self): class ToString(object): def __init__(self): self.contents = '' def __call__(self, msg): self.contents += msg + '\n' # Single-io model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True) model._set_inputs(np.ones((3, 4))) # need to build model first print_fn = ToString() model.summary(print_fn=print_fn) self.assertTrue('Trainable params: 356' in print_fn.contents) # Multi-io model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True) model._set_inputs([np.ones((3, 4)), np.ones((3, 4))]) # need to build model first print_fn = ToString() model.summary(print_fn=print_fn) self.assertTrue('Trainable params: 587' in print_fn.contents) @test_util.run_in_graph_and_eager_modes def test_subclass_nested_in_subclass(self): num_classes = 2 num_samples = 100 input_dim = 50 model = NestedTestModel1(num_classes=num_classes) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), metrics=['acc']) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8 + len(model.test_net.weights)) self.assertEqual(len(model.non_trainable_weights), 2 + len(model.test_net.non_trainable_weights)) self.assertEqual(len(model.trainable_weights), 6 + len(model.test_net.trainable_weights)) @test_util.run_in_graph_and_eager_modes def test_graph_nested_in_subclass(self): num_classes = 2 num_samples = 100 input_dim = 50 model = NestedTestModel2(num_classes=num_classes) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), metrics=['acc']) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 8 + len(model.test_net.weights)) self.assertEqual(len(model.non_trainable_weights), 2 + len(model.test_net.non_trainable_weights)) self.assertEqual(len(model.trainable_weights), 6 + len(model.test_net.trainable_weights)) @test_util.run_in_graph_and_eager_modes def test_subclass_nested_in_graph(self): num_classes = 2 num_samples = 100 input_dim = 50 model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001), metrics=['acc']) x = np.ones((num_samples, input_dim)) y = np.zeros((num_samples, num_classes)) model.fit(x, y, epochs=2, batch_size=32, verbose=0) _ = model.evaluate(x, y, verbose=0) self.assertEqual(len(model.weights), 16) self.assertEqual( len(model.non_trainable_weights), 4) self.assertEqual(len(model.trainable_weights), 12) @test_util.run_in_graph_and_eager_modes def test_support_for_manual_training_arg(self): # In most cases, the `training` argument is left unspecified, in which # case it defaults to value corresponding to the Model method being used # (fit -> True, predict -> False, etc). # If the user writes their model `call` method to take # an explicit `training` argument, we must check that the correct value # is being passed to the model for each method call. class DPNet(keras.Model): def __init__(self): super(DPNet, self).__init__() self.dp = keras.layers.Dropout(0.5) self.dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones') def call(self, inputs, training=False): x = self.dp(inputs, training=training) return self.dense(x) model = DPNet() x = np.ones((10, 10)) y = model.predict(x) self.assertEqual(np.sum(y), np.sum(x)) model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) loss = model.train_on_batch(x, y) self.assertGreater(loss, 0.1) def test_no_dependency(self): class Foo(keras.Model): def __init__(self): super(Foo, self).__init__() self.isdep = keras.layers.Dense(1) self.notdep = data_structures.NoDependency(keras.layers.Dense(2)) self.notdep_var = data_structures.NoDependency( resource_variable_ops.ResourceVariable(1., name='notdep_var')) m = Foo() self.assertEqual([m.isdep, m.notdep], m.layers) self.assertEqual(1, len(m._checkpoint_dependencies)) self.assertIs(m.isdep, m._checkpoint_dependencies[0].ref) self.assertEqual('notdep_var:0', m.notdep_var.name) def test_extra_variable(self): class ExtraVar(keras.Model): def __init__(self): super(ExtraVar, self).__init__() self.dense = keras.layers.Dense(1) self.var = resource_variable_ops.ResourceVariable(1.) self.not_trainable_var = resource_variable_ops.ResourceVariable( 2., trainable=False) def call(self, inputs): return self.dense(inputs + self.var) m = ExtraVar() self.assertTrue(m.trainable) self.assertEqual([m.dense], m.layers) self.assertEqual([m.var, m.not_trainable_var], m.variables) self.assertEqual([m.var], m.trainable_variables) self.assertEqual([m.not_trainable_var], m.non_trainable_variables) m.trainable = False self.assertEqual([m.var, m.not_trainable_var], m.variables) self.assertEqual([], m.trainable_variables) self.assertEqual([m.var, m.not_trainable_var], m.non_trainable_variables) m.trainable = True m(array_ops.ones([1, 1])) self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.variables) self.assertEqual([m.dense.kernel, m.dense.bias], m.dense.weights) self.assertEqual([m.dense.kernel, m.dense.bias, m.var, m.not_trainable_var], m.variables) self.assertEqual([m.dense.kernel, m.dense.bias, m.var], m.trainable_variables) self.assertEqual([m.not_trainable_var], m.non_trainable_variables) m.dense.trainable = False self.assertEqual( [m.var, m.dense.kernel, m.dense.bias, m.not_trainable_var], m.variables) self.assertEqual([m.var], m.trainable_variables) self.assertEqual([m.dense.kernel, m.dense.bias, m.not_trainable_var], m.non_trainable_variables) class CustomCallModel(keras.Model): def __init__(self): super(CustomCallModel, self).__init__() self.dense1 = keras.layers.Dense(1, activation='relu') self.dense2 = keras.layers.Dense(1, activation='softmax') def call(self, first, second, fiddle_with_output='no', training=True): combined = self.dense1(first) + self.dense2(second) if fiddle_with_output == 'yes': return 10. * combined else: return combined class TrainingNoDefaultModel(keras.Model): def __init__(self): super(TrainingNoDefaultModel, self).__init__() self.dense1 = keras.layers.Dense(1) def call(self, x, training): return self.dense1(x) class CustomCallSignatureTests(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_no_inputs_in_signature(self): model = CustomCallModel() first = array_ops.ones([2, 3]) second = array_ops.ones([2, 5]) output = model(first, second) self.evaluate([v.initializer for v in model.variables]) expected_output = self.evaluate(model.dense1(first) + model.dense2(second)) self.assertAllClose(expected_output, self.evaluate(output)) output = model(first, second, fiddle_with_output='yes') self.assertAllClose(10. * expected_output, self.evaluate(output)) output = model(first, second=second, training=False) self.assertAllClose(expected_output, self.evaluate(output)) @test_util.run_in_graph_and_eager_modes def test_training_args_call_build(self): input_dim = 2 model = TrainingNoDefaultModel() self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) model.build((None, input_dim)) self.assertTrue(model.weights, ('Model should have weights now that it ' 'has been properly built.')) self.assertTrue(model.built, 'Model should be built after calling `build`.') @test_util.run_in_graph_and_eager_modes def test_custom_call_kwargs_and_build(self): first_input_shape = (2, 3) second_input_shape = (2, 5) model = CustomCallModel() self.assertFalse(model.built, 'Model should not have been built') self.assertFalse(model.weights, ('Model should have no weights since it ' 'has not been built.')) with self.assertRaisesRegexp( ValueError, 'cannot build your model if it has positional'): model.build(input_shape=[first_input_shape, second_input_shape]) @test_util.run_in_graph_and_eager_modes def test_inputs_in_signature(self): class HasInputsAndOtherPositional(keras.Model): def call(self, inputs, some_other_arg, training=False): return inputs def compute_output_shape(self, input_shape): return input_shape model = HasInputsAndOtherPositional() with self.assertRaisesRegexp( TypeError, 'everything else as a keyword argument'): x1, x2 = keras.Input((1, 1)), keras.Input((1, 1)) model(x1, x2) @test_util.run_in_graph_and_eager_modes def test_kwargs_in_signature(self): class HasKwargs(keras.Model): def call(self, x, y=3, **key_words): return x model = HasKwargs() arg = array_ops.ones([]) model(arg, a=3) if not context.executing_eagerly(): six.assertCountEqual(self, [arg], model.inputs) @test_util.run_in_graph_and_eager_modes def test_args_in_signature(self): class HasArgs(keras.Model): def call(self, x, *args, **kwargs): return [x] + list(args) def compute_output_shape(self, input_shape): return input_shape model = HasArgs() x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1)) model(x1, x2, x3, a=3) if not context.executing_eagerly(): six.assertCountEqual(self, [x1, x2, x3], model.inputs) def test_args_and_keywords_in_signature(self): class HasArgs(keras.Model): def call(self, x, training=True, *args, **kwargs): return x with context.graph_mode(): model = HasArgs() x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1)) with self.assertRaisesRegexp(TypeError, 'args and arguments with'): model(x1, x2, x3, a=3) def test_training_no_default(self): with context.graph_mode(): model = TrainingNoDefaultModel() arg = array_ops.ones([1, 1]) model(arg, True) six.assertCountEqual(self, [arg], model.inputs) def test_training_no_default_with_positional(self): class TrainingNoDefaultWithPositional(keras.Model): def call(self, x, training, positional): return x with context.graph_mode(): model = TrainingNoDefaultWithPositional() x1, x2, x3 = keras.Input((1, 1)), keras.Input((1, 1)), keras.Input((1, 1)) with self.assertRaisesRegexp(TypeError, 'after a non-input'): model(x1, x2, x3) if __name__ == '__main__': test.main()
unknown
codeparrot/codeparrot-clean
from Tools.Profile import profile profile("LOAD:ElementTree") import xml.etree.cElementTree import os profile("LOAD:enigma_skin") from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, \ addFont, gRGB, eWindowStyleSkinned, getDesktop from Components.config import ConfigSubsection, ConfigText, config, ConfigNothing from Components.Converter.Converter import Converter from Components.Sources.Source import Source, ObsoleteSource from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_FONTS, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists, SCOPE_SKIN_IMAGE from Tools.Import import my_import from Tools.LoadPixmap import LoadPixmap from Components.RcModel import rc_model from boxbranding import getBoxType colorNames = {} # Predefined fonts, typically used in built-in screens and for components like # the movie list and so. fonts = { "Body": ("Regular", 18, 22, 16), "ChoiceList": ("Regular", 20, 24, 18), } parameters = {} def dump(x, i=0): print " " * i + str(x) try: for n in x.childNodes: dump(n, i + 1) except: None class SkinError(Exception): def __init__(self, message): self.msg = message def __str__(self): return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg) dom_skins = [ ] def addSkin(name, scope = SCOPE_SKIN): # read the skin if name is None or not len(name): print "[SKIN ERROR] attempt to add a skin without filename" return False filename = resolveFilename(scope, name) if fileExists(filename): mpath = os.path.dirname(filename) + "/" try: dom_skins.append((mpath, xml.etree.cElementTree.parse(filename).getroot())) except: print "[SKIN ERROR] error in %s" % filename return False else: return True return False # get own skin_user_skinname.xml file, if exist def skin_user_skinname(): name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml" filename = resolveFilename(SCOPE_CONFIG, name) if fileExists(filename): return name return None # we do our best to always select the "right" value # skins are loaded in order of priority: skin with # highest priority is loaded last, usually the user-provided # skin. # currently, loadSingleSkinData (colors, bordersets etc.) # are applied one-after-each, in order of ascending priority. # the dom_skin will keep all screens in descending priority, # so the first screen found will be used. # example: loadSkin("nemesis_greenline/skin.xml") config.skin = ConfigSubsection() DEFAULT_SKIN = "GigabluePax/skin.xml" if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)): DEFAULT_SKIN = "om-black/skin.xml" if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)): DEFAULT_SKIN = "adriatic32_turquoise/skin.xml" if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)): DEFAULT_SKIN = "Vali.HD.flex.MOD.wolv007/skin.xml" config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN) profile("LoadSkin") res = None name = skin_user_skinname() if name: res = addSkin(name, SCOPE_CONFIG) if not name or not res: addSkin('skin_user.xml', SCOPE_CONFIG) # some boxes lie about their dimensions addSkin('skin_box.xml') # add optional discrete second infobar addSkin('skin_second_infobar.xml') # Only one of these is present, compliments of AM_CONDITIONAL if getBoxType() in ('gb800ue', 'gb800ueplus', 'gbultraue', 'gbquad', 'gbquadplus'): config.skin.lcdskin = ConfigText(default = "skin_lcd_default.xml") else: config.skin.lcdskin = ConfigNothing() display_skin_id = 1 if fileExists('/usr/share/enigma2/lcd_skin/skin_lcd_default.xml'): if fileExists(resolveFilename(SCOPE_CONFIG, config.skin.lcdskin.value)): addSkin(config.skin.lcdskin.value, SCOPE_CONFIG) else: addSkin('lcd_skin/' + config.skin.lcdskin.value) addSkin('skin_display.xml') if addSkin('skin_display96.xml'): # Color OLED display_skin_id = 2 addSkin('skin_text.xml') addSkin('skin_subtitles.xml') try: if not addSkin(config.skin.primary_skin.value): raise SkinError, "primary skin not found" except Exception, err: print "SKIN ERROR:", err skin = DEFAULT_SKIN if config.skin.primary_skin.value == skin: skin = 'skin.xml' print "defaulting to standard skin...", skin config.skin.primary_skin.value = skin addSkin(skin) del skin addSkin('skin_default.xml') profile("LoadSkinDefaultDone") def parseCoordinate(s, e, size=0, font=None): s = s.strip() if s == "center": val = (e - size)/2 elif s == '*': return None else: if s[0] is 'e': val = e s = s[1:] elif s[0] is 'c': val = e/2 s = s[1:] else: val = 0; if s: if s[-1] is '%': val += e * int(s[:-1]) / 100 elif s[-1] is 'w': val += fonts[font][3] * int(s[:-1]); elif s[-1] is 'h': val += fonts[font][2] * int(s[:-1]); else: val += int(s) #if val < 0: # Label shadowsOffset # val = 0 # can have a negative value return val def getParentSize(object, desktop): size = eSize() if object: parent = object.getParent() # For some widgets (e.g. ScrollLabel) the skin attributes are applied to # a child widget, instead of to the widget itself. In that case, the parent # we have here is not the real parent, but it is the main widget. # We have to go one level higher to get the actual parent. # We can detect this because the 'parent' will not have a size yet # (the main widget's size will be calculated internally, as soon as the child # widget has parsed the skin attributes) if parent and parent.size().isEmpty(): parent = parent.getParent() if parent: size = parent.size() elif desktop: #widget has no parent, use desktop size instead for relative coordinates size = desktop.size() return size def parsePosition(s, scale, object = None, desktop = None, size = None): x, y = s.split(',') parentsize = eSize() if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')): parentsize = getParentSize(object, desktop) xval = parseCoordinate(x, parentsize.width(), size and size.width()) yval = parseCoordinate(y, parentsize.height(), size and size.height()) return ePoint(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1]) def parseSize(s, scale, object = None, desktop = None): x, y = s.split(',') parentsize = eSize() if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')): parentsize = getParentSize(object, desktop) xval = parseCoordinate(x, parentsize.width()) yval = parseCoordinate(y, parentsize.height()) return eSize(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1]) def parseFont(s, scale): try: f = fonts[s] name = f[0] size = f[1] except: name, size = s.split(';') return gFont(name, int(size) * scale[0][0] / scale[0][1]) def parseColor(s): if s[0] != '#': try: return colorNames[s] except: raise SkinError("color '%s' must be #aarrggbb or valid named color" % (s)) return gRGB(int(s[1:], 0x10)) def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))): # walk all attributes size = None pos = None font = None for attrib, value in node.items(): if attrib not in ignore: if attrib in filenames: value = resolveFilename(SCOPE_CURRENT_SKIN, value, path_prefix=skin_path_prefix) # Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder) # it needs to be set at least before the size is set, in order for the # window dimensions to be calculated correctly in all situations. # If wfNoBorder is applied after the size has been set, the window will fail to clear the title area. # Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after # the size, a scrollbar will not be shown until the selection moves for the first time if attrib == 'size': size = value.encode("utf-8") elif attrib == 'position': pos = value.encode("utf-8") elif attrib == 'font': font = value.encode("utf-8") skinAttributes.append((attrib, font)) else: skinAttributes.append((attrib, value.encode("utf-8"))) if pos is not None: pos, size = context.parse(pos, size, font) skinAttributes.append(('position', pos)) if size is not None: skinAttributes.append(('size', size)) def morphRcImagePath(value): if rc_model.rcIsDefault() is False: if value == '/usr/share/enigma2/skin_default/rc.png' or value == '/usr/share/enigma2/skin_default/rcold.png': value = rc_model.getRcLocation() + 'rc.png' return value def loadPixmap(path, desktop): option = path.find("#") if option != -1: path = path[:option] ptr = LoadPixmap(morphRcImagePath(path), desktop) if ptr is None: raise SkinError("pixmap file %s not found!" % (path)) return ptr class AttributeParser: def __init__(self, guiObject, desktop, scale=((1,1),(1,1))): self.guiObject = guiObject self.desktop = desktop self.scaleTuple = scale def applyOne(self, attrib, value): try: getattr(self, attrib)(value) except AttributeError: print "[Skin] Attribute not implemented:", attrib, "value:", value except SkinError, ex: print "[Skin] Error:", ex def applyAll(self, attrs): for attrib, value in attrs: self.applyOne(attrib, value) def conditional(self, value): pass def position(self, value): if isinstance(value, tuple): self.guiObject.move(ePoint(*value)) else: self.guiObject.move(parsePosition(value, self.scaleTuple, self.guiObject, self.desktop, self.guiObject.csize())) def size(self, value): if isinstance(value, tuple): self.guiObject.resize(eSize(*value)) else: self.guiObject.resize(parseSize(value, self.scaleTuple, self.guiObject, self.desktop)) def animationMode(self, value): self.guiObject.setAnimationMode( { "disable": 0x00, "off": 0x00, "offshow": 0x10, "offhide": 0x01, "onshow": 0x01, "onhide": 0x10, }[value]) def title(self, value): self.guiObject.setTitle(_(value)) def text(self, value): self.guiObject.setText(_(value)) def font(self, value): self.guiObject.setFont(parseFont(value, self.scaleTuple)) def zPosition(self, value): self.guiObject.setZPosition(int(value)) def itemHeight(self, value): self.guiObject.setItemHeight(int(value)) def pixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setPixmap(ptr) def backgroundPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setBackgroundPicture(ptr) def selectionPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setSelectionPicture(ptr) def sliderPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setSliderPicture(ptr) def scrollbarbackgroundPixmap(self, value): ptr = loadPixmap(value, self.desktop) self.guiObject.setScrollbarBackgroundPicture(ptr) def alphatest(self, value): self.guiObject.setAlphatest( { "on": 1, "off": 0, "blend": 2, }[value]) def scale(self, value): self.guiObject.setScale(1) def orientation(self, value): # used by eSlider try: self.guiObject.setOrientation(* { "orVertical": (self.guiObject.orVertical, False), "orTopToBottom": (self.guiObject.orVertical, False), "orBottomToTop": (self.guiObject.orVertical, True), "orHorizontal": (self.guiObject.orHorizontal, False), "orLeftToRight": (self.guiObject.orHorizontal, False), "orRightToLeft": (self.guiObject.orHorizontal, True), }[value]) except KeyError: print "oprientation must be either orVertical or orHorizontal!" def valign(self, value): try: self.guiObject.setVAlign( { "top": self.guiObject.alignTop, "center": self.guiObject.alignCenter, "bottom": self.guiObject.alignBottom }[value]) except KeyError: print "valign must be either top, center or bottom!" def halign(self, value): try: self.guiObject.setHAlign( { "left": self.guiObject.alignLeft, "center": self.guiObject.alignCenter, "right": self.guiObject.alignRight, "block": self.guiObject.alignBlock }[value]) except KeyError: print "halign must be either left, center, right or block!" def textOffset(self, value): x, y = value.split(',') self.guiObject.setTextOffset(ePoint(int(x) * self.scaleTuple[0][0] / self.scaleTuple[0][1], int(y) * self.scaleTuple[1][0] / self.scaleTuple[1][1])) def flags(self, value): flags = value.split(',') for f in flags: try: fv = eWindow.__dict__[f] self.guiObject.setFlag(fv) except KeyError: print "illegal flag %s!" % f def backgroundColor(self, value): self.guiObject.setBackgroundColor(parseColor(value)) def backgroundColorSelected(self, value): self.guiObject.setBackgroundColorSelected(parseColor(value)) def foregroundColor(self, value): self.guiObject.setForegroundColor(parseColor(value)) def foregroundColorSelected(self, value): self.guiObject.setForegroundColorSelected(parseColor(value)) def shadowColor(self, value): self.guiObject.setShadowColor(parseColor(value)) def selectionDisabled(self, value): self.guiObject.setSelectionEnable(0) def transparent(self, value): self.guiObject.setTransparent(int(value)) def borderColor(self, value): self.guiObject.setBorderColor(parseColor(value)) def borderWidth(self, value): self.guiObject.setBorderWidth(int(value)) def scrollbarMode(self, value): self.guiObject.setScrollbarMode(getattr(self.guiObject, value)) # { "showOnDemand": self.guiObject.showOnDemand, # "showAlways": self.guiObject.showAlways, # "showNever": self.guiObject.showNever, # "showLeft": self.guiObject.showLeft # }[value]) def enableWrapAround(self, value): self.guiObject.setWrapAround(True) def itemHeight(self, value): self.guiObject.setItemHeight(int(value)) def pointer(self, value): (name, pos) = value.split(':') pos = parsePosition(pos, self.scaleTuple) ptr = loadPixmap(name, self.desktop) self.guiObject.setPointer(0, ptr, pos) def seek_pointer(self, value): (name, pos) = value.split(':') pos = parsePosition(pos, self.scaleTuple) ptr = loadPixmap(name, self.desktop) self.guiObject.setPointer(1, ptr, pos) def shadowOffset(self, value): self.guiObject.setShadowOffset(parsePosition(value, self.scaleTuple)) def noWrap(self, value): self.guiObject.setNoWrap(1) def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))): # Someone still using applySingleAttribute? AttributeParser(guiObject, desktop, scale).applyOne(attrib, value) def applyAllAttributes(guiObject, desktop, attributes, scale): AttributeParser(guiObject, desktop, scale).applyAll(attributes) def loadSingleSkinData(desktop, skin, path_prefix): """loads skin data like colors, windowstyle etc.""" assert skin.tag == "skin", "root element in skin must be 'skin'!" for c in skin.findall("output"): id = c.attrib.get('id') if id: id = int(id) else: id = 0 if id == 0: # framebuffer for res in c.findall("resolution"): get_attr = res.attrib.get xres = get_attr("xres") if xres: xres = int(xres) else: xres = 720 yres = get_attr("yres") if yres: yres = int(yres) else: yres = 576 bpp = get_attr("bpp") if bpp: bpp = int(bpp) else: bpp = 32 #print "Resolution:", xres,yres,bpp from enigma import gMainDC gMainDC.getInstance().setResolution(xres, yres) desktop.resize(eSize(xres, yres)) if bpp != 32: # load palette (not yet implemented) pass for skininclude in skin.findall("include"): filename = skininclude.attrib.get("filename") if filename: skinfile = resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix) if not fileExists(skinfile): skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix) if fileExists(skinfile): print "[SKIN] loading include:", skinfile loadSkin(skinfile) for c in skin.findall("colors"): for color in c.findall("color"): get_attr = color.attrib.get name = get_attr("name") color = get_attr("value") if name and color: colorNames[name] = parseColor(color) #print "Color:", name, color else: raise SkinError("need color and name, got %s %s" % (name, color)) for c in skin.findall("fonts"): for font in c.findall("font"): get_attr = font.attrib.get filename = get_attr("filename", "<NONAME>") name = get_attr("name", "Regular") scale = get_attr("scale") if scale: scale = int(scale) else: scale = 100 is_replacement = get_attr("replacement") and True or False render = get_attr("render") if render: render = int(render) else: render = 0 resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix) if not fileExists(resolved_font): #when font is not available look at current skin path skin_path = resolveFilename(SCOPE_CURRENT_SKIN, filename) if fileExists(skin_path): resolved_font = skin_path addFont(resolved_font, name, scale, is_replacement, render) #print "Font: ", resolved_font, name, scale, is_replacement for alias in c.findall("alias"): get = alias.attrib.get try: name = get("name") font = get("font") size = int(get("size")) height = int(get("height", size)) # to be calculated some day width = int(get("width", size)) global fonts fonts[name] = (font, size, height, width) except Exception, ex: print "[SKIN] bad font alias", ex for c in skin.findall("parameters"): for parameter in c.findall("parameter"): get = parameter.attrib.get try: name = get("name") value = get("value") parameters[name] = map(int, value.split(",")) except Exception, ex: print "[SKIN] bad parameter", ex for c in skin.findall("subtitles"): from enigma import eWidget, eSubtitleWidget scale = ((1,1),(1,1)) for substyle in c.findall("sub"): get_attr = substyle.attrib.get font = parseFont(get_attr("font"), scale) col = get_attr("foregroundColor") if col: foregroundColor = parseColor(col) haveColor = 1 else: foregroundColor = gRGB(0xFFFFFF) haveColor = 0 col = get_attr("borderColor") if col: borderColor = parseColor(col) else: borderColor = gRGB(0) borderwidth = get_attr("borderWidth") if borderwidth is None: # default: use a subtitle border borderWidth = 3 else: borderWidth = int(borderwidth) face = eSubtitleWidget.__dict__[get_attr("name")] eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth) for windowstyle in skin.findall("windowstyle"): style = eWindowStyleSkinned() style_id = windowstyle.attrib.get("id") if style_id: style_id = int(style_id) else: style_id = 0 # defaults font = gFont("Regular", 20) offset = eSize(20, 5) for title in windowstyle.findall("title"): get_attr = title.attrib.get offset = parseSize(get_attr("offset"), ((1,1),(1,1))) font = parseFont(get_attr("font"), ((1,1),(1,1))) style.setTitleFont(font); style.setTitleOffset(offset) #print " ", font, offset for borderset in windowstyle.findall("borderset"): bsName = str(borderset.attrib.get("name")) for pixmap in borderset.findall("pixmap"): get_attr = pixmap.attrib.get bpName = get_attr("pos") filename = get_attr("filename") if filename and bpName: png = loadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix), desktop) style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png) #print " borderset:", bpName, filename for color in windowstyle.findall("color"): get_attr = color.attrib.get colorType = get_attr("name") color = parseColor(get_attr("color")) try: style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color) except: raise SkinError("Unknown color %s" % (colorType)) #pass #print " color:", type, color x = eWindowStyleManager.getInstance() x.setStyle(style_id, style) for margin in skin.findall("margin"): style_id = margin.attrib.get("id") if style_id: style_id = int(style_id) else: style_id = 0 r = eRect(0,0,0,0) v = margin.attrib.get("left") if v: r.setLeft(int(v)) v = margin.attrib.get("top") if v: r.setTop(int(v)) v = margin.attrib.get("right") if v: r.setRight(int(v)) v = margin.attrib.get("bottom") if v: r.setBottom(int(v)) # the "desktop" parameter is hardcoded to the UI screen, so we must ask # for the one that this actually applies to. getDesktop(style_id).setMargins(r) dom_screens = {} def loadSkin(name, scope = SCOPE_SKIN): # Now a utility for plugins to add skin data to the screens global dom_screens, display_skin_id filename = resolveFilename(scope, name) if fileExists(filename): path = os.path.dirname(filename) + "/" for elem in xml.etree.cElementTree.parse(filename).getroot(): if elem.tag == 'screen': name = elem.attrib.get('name', None) if name: sid = elem.attrib.get('id', None) if sid and (sid != display_skin_id): # not for this display elem.clear() continue if name in dom_screens: print "loadSkin: Screen already defined elsewhere:", name elem.clear() else: dom_screens[name] = (elem, path) else: elem.clear() else: elem.clear() def loadSkinData(desktop): # Kinda hackish, but this is called once by mytest.py global dom_skins skins = dom_skins[:] skins.reverse() for (path, dom_skin) in skins: loadSingleSkinData(desktop, dom_skin, path) for elem in dom_skin: if elem.tag == 'screen': name = elem.attrib.get('name', None) if name: sid = elem.attrib.get('id', None) if sid and (sid != display_skin_id): # not for this display elem.clear() continue if name in dom_screens: # Kill old versions, save memory dom_screens[name][0].clear() dom_screens[name] = (elem, path) else: # without name, it's useless! elem.clear() else: # non-screen element, no need for it any longer elem.clear() # no longer needed, we know where the screens are now. del dom_skins class additionalWidget: pass # Class that makes a tuple look like something else. Some plugins just assume # that size is a string and try to parse it. This class makes that work. class SizeTuple(tuple): def split(self, *args): return (str(self[0]), str(self[1])) def strip(self, *args): return '%s,%s' % self def __str__(self): return '%s,%s' % self class SkinContext: def __init__(self, parent=None, pos=None, size=None, font=None): if parent is not None: if pos is not None: pos, size = parent.parse(pos, size, font) self.x, self.y = pos self.w, self.h = size else: self.x = None self.y = None self.w = None self.h = None def __str__(self): return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h) def parse(self, pos, size, font): if pos == "fill": pos = (self.x, self.y) size = (self.w, self.h) self.w = 0 self.h = 0 else: w,h = size.split(',') w = parseCoordinate(w, self.w, 0, font) h = parseCoordinate(h, self.h, 0, font) if pos == "bottom": pos = (self.x, self.y + self.h - h) size = (self.w, h) self.h -= h elif pos == "top": pos = (self.x, self.y) size = (self.w, h) self.h -= h self.y += h elif pos == "left": pos = (self.x, self.y) size = (w, self.h) self.x += w self.w -= w elif pos == "right": pos = (self.x + self.w - w, self.y) size = (w, self.h) self.w -= w else: size = (w, h) pos = pos.split(',') pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font)) return (SizeTuple(pos), SizeTuple(size)) class SkinContextStack(SkinContext): # A context that stacks things instead of aligning them def parse(self, pos, size, font): if pos == "fill": pos = (self.x, self.y) size = (self.w, self.h) else: w,h = size.split(',') w = parseCoordinate(w, self.w, 0, font) h = parseCoordinate(h, self.h, 0, font) if pos == "bottom": pos = (self.x, self.y + self.h - h) size = (self.w, h) elif pos == "top": pos = (self.x, self.y) size = (self.w, h) elif pos == "left": pos = (self.x, self.y) size = (w, self.h) elif pos == "right": pos = (self.x + self.w - w, self.y) size = (w, self.h) else: size = (w, h) pos = pos.split(',') pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font)) return (SizeTuple(pos), SizeTuple(size)) def readSkin(screen, skin, names, desktop): if not isinstance(names, list): names = [names] # try all skins, first existing one have priority global dom_screens for n in names: myscreen, path = dom_screens.get(n, (None,None)) if myscreen is not None: # use this name for debug output name = n break else: name = "<embedded-in-'%s'>" % screen.__class__.__name__ # otherwise try embedded skin if myscreen is None: myscreen = getattr(screen, "parsedSkin", None) # try uncompiled embedded skin if myscreen is None and getattr(screen, "skin", None): skin = screen.skin print "[SKIN] Parsing embedded skin", name if (isinstance(skin, tuple)): for s in skin: candidate = xml.etree.cElementTree.fromstring(s) if candidate.tag == 'screen': sid = candidate.attrib.get('id', None) if (not sid) or (int(sid) == display_skin_id): myscreen = candidate break; else: print "[SKIN] Hey, no suitable screen!" else: myscreen = xml.etree.cElementTree.fromstring(skin) if myscreen: screen.parsedSkin = myscreen if myscreen is None: print "[SKIN] No skin to read..." myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>") screen.skinAttributes = [ ] skin_path_prefix = getattr(screen, "skin_path", path) context = SkinContextStack() s = desktop.bounds() context.x = s.left() context.y = s.top() context.w = s.width() context.h = s.height() del s collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",)) context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size')) screen.additionalWidgets = [ ] screen.renderer = [ ] visited_components = set() # now walk all widgets and stuff def process_none(widget, context): pass def process_widget(widget, context): get_attr = widget.attrib.get # ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped # widgets (source->renderer). wname = get_attr('name') wsource = get_attr('source') if wname is None and wsource is None: print "widget has no name and no source!" return if wname: #print "Widget name=", wname visited_components.add(wname) # get corresponding 'gui' object try: attributes = screen[wname].skinAttributes = [ ] except: raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!") # assert screen[wname] is not Source collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',)) elif wsource: # get corresponding source #print "Widget source=", wsource while True: # until we found a non-obsolete source # parse our current "wsource", which might specifiy a "related screen" before the dot, # for example to reference a parent, global or session-global screen. scr = screen # resolve all path components path = wsource.split('.') while len(path) > 1: scr = screen.getRelatedScreen(path[0]) if scr is None: #print wsource #print name raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!") path = path[1:] # resolve the source. source = scr.get(path[0]) if isinstance(source, ObsoleteSource): # however, if we found an "obsolete source", issue warning, and resolve the real source. print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source) print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % (source.removal_date) if source.description: print source.description wsource = source.new_source else: # otherwise, use that source. break if source is None: raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!") wrender = get_attr('render') if not wrender: raise SkinError("you must define a renderer with render= for source '%s'" % (wsource)) for converter in widget.findall("convert"): ctype = converter.get('type') assert ctype, "'convert'-tag needs a 'type'-attribute" #print "Converter:", ctype try: parms = converter.text.strip() except: parms = "" #print "Params:", parms converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype) c = None for i in source.downstream_elements: if isinstance(i, converter_class) and i.converter_arguments == parms: c = i if c is None: c = converter_class(parms) c.connect(source) source = c renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender) renderer = renderer_class() # instantiate renderer renderer.connect(source) # connect to source attributes = renderer.skinAttributes = [ ] collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source')) screen.renderer.append(renderer) def process_applet(widget, context): try: codeText = widget.text.strip() widgetType = widget.attrib.get('type') code = compile(codeText, "skin applet", "exec") except Exception, ex: raise SkinError("applet failed to compile: " + str(ex)) if widgetType == "onLayoutFinish": screen.onLayoutFinish.append(code) else: raise SkinError("applet type '%s' unknown!" % widgetType) def process_elabel(widget, context): w = additionalWidget() w.widget = eLabel w.skinAttributes = [ ] collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',)) screen.additionalWidgets.append(w) def process_epixmap(widget, context): w = additionalWidget() w.widget = ePixmap w.skinAttributes = [ ] collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',)) screen.additionalWidgets.append(w) def process_screen(widget, context): for w in widget.getchildren(): conditional = w.attrib.get('conditional') if conditional and not [i for i in conditional.split(",") if i in screen.keys()]: continue p = processors.get(w.tag, process_none) try: p(w, context) except SkinError, e: print "[Skin] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e def process_panel(widget, context): n = widget.attrib.get('name') if n: try: s = dom_screens[n] except KeyError: print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name) else: process_screen(s[0], context) layout = widget.attrib.get('layout') if layout == 'stack': cc = SkinContextStack else: cc = SkinContext try: c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font')) except Exception, ex: raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) ) process_screen(widget, c) processors = { None: process_none, "widget": process_widget, "applet": process_applet, "eLabel": process_elabel, "ePixmap": process_epixmap, "panel": process_panel } try: context.x = 0 # reset offsets, all components are relative to screen context.y = 0 # coordinates. process_screen(myscreen, context) except Exception, e: print "[Skin] SKIN ERROR in %s:" % name, e from Components.GUIComponent import GUIComponent nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)] assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components)) # This may look pointless, but it unbinds 'screen' from the nested scope. A better # solution is to avoid the nested scope above and use the context object to pass # things around. screen = None visited_components = None
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # ansible-vault is a script that encrypts/decrypts YAML files. See # https://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import time import os def main(args): path = os.path.abspath(args[1]) fo = open(path, 'r+') content = fo.readlines() content.append('faux editor added at %s\n' % time.time()) fo.seek(0) fo.write(''.join(content)) fo.close() return 0 if __name__ == '__main__': sys.exit(main(sys.argv[:]))
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import sys import textwrap import _pytest.assertion as plugin import _pytest._code import py import pytest from _pytest.assertion import reinterpret from _pytest.assertion import util PY3 = sys.version_info >= (3, 0) @pytest.fixture def mock_config(): class Config(object): verbose = False def getoption(self, name): if name == 'verbose': return self.verbose raise KeyError('Not mocked out: %s' % name) return Config() def interpret(expr): return reinterpret.reinterpret(expr, _pytest._code.Frame(sys._getframe(1))) class TestBinReprIntegration: def test_pytest_assertrepr_compare_called(self, testdir): testdir.makeconftest(""" l = [] def pytest_assertrepr_compare(op, left, right): l.append((op, left, right)) def pytest_funcarg__l(request): return l """) testdir.makepyfile(""" def test_hello(): assert 0 == 1 def test_check(l): assert l == [("==", 0, 1)] """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*test_hello*FAIL*", "*test_check*PASS*", ]) def callequal(left, right, verbose=False): config = mock_config() config.verbose = verbose return plugin.pytest_assertrepr_compare(config, '==', left, right) class TestAssert_reprcompare: def test_different_types(self): assert callequal([0, 1], 'foo') is None def test_summary(self): summary = callequal([0, 1], [0, 2])[0] assert len(summary) < 65 def test_text_diff(self): diff = callequal('spam', 'eggs')[1:] assert '- spam' in diff assert '+ eggs' in diff def test_text_skipping(self): lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs') assert 'Skipping' in lines[1] for line in lines: assert 'a'*50 not in line def test_text_skipping_verbose(self): lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True) assert '- ' + 'a'*50 + 'spam' in lines assert '+ ' + 'a'*50 + 'eggs' in lines def test_multiline_text_diff(self): left = 'foo\nspam\nbar' right = 'foo\neggs\nbar' diff = callequal(left, right) assert '- spam' in diff assert '+ eggs' in diff def test_list(self): expl = callequal([0, 1], [0, 2]) assert len(expl) > 1 @pytest.mark.parametrize( ['left', 'right', 'expected'], [ ([0, 1], [0, 2], """ Full diff: - [0, 1] ? ^ + [0, 2] ? ^ """), ({0: 1}, {0: 2}, """ Full diff: - {0: 1} ? ^ + {0: 2} ? ^ """), (set([0, 1]), set([0, 2]), """ Full diff: - set([0, 1]) ? ^ + set([0, 2]) ? ^ """ if not PY3 else """ Full diff: - {0, 1} ? ^ + {0, 2} ? ^ """) ] ) def test_iterable_full_diff(self, left, right, expected): """Test the full diff assertion failure explanation. When verbose is False, then just a -v notice to get the diff is rendered, when verbose is True, then ndiff of the pprint is returned. """ expl = callequal(left, right, verbose=False) assert expl[-1] == 'Use -v to get the full diff' expl = '\n'.join(callequal(left, right, verbose=True)) assert expl.endswith(textwrap.dedent(expected).strip()) def test_list_different_lenghts(self): expl = callequal([0, 1], [0, 1, 2]) assert len(expl) > 1 expl = callequal([0, 1, 2], [0, 1]) assert len(expl) > 1 def test_dict(self): expl = callequal({'a': 0}, {'a': 1}) assert len(expl) > 1 def test_dict_omitting(self): lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}) assert lines[1].startswith('Omitting 1 identical item') assert 'Common items' not in lines for line in lines[1:]: assert 'b' not in line def test_dict_omitting_verbose(self): lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True) assert lines[1].startswith('Common items:') assert 'Omitting' not in lines[1] assert lines[2] == "{'b': 1}" def test_set(self): expl = callequal(set([0, 1]), set([0, 2])) assert len(expl) > 1 def test_frozenzet(self): expl = callequal(frozenset([0, 1]), set([0, 2])) assert len(expl) > 1 def test_Sequence(self): col = py.builtin._tryimport( "collections.abc", "collections", "sys") if not hasattr(col, "MutableSequence"): pytest.skip("cannot import MutableSequence") MutableSequence = col.MutableSequence class TestSequence(MutableSequence): # works with a Sequence subclass def __init__(self, iterable): self.elements = list(iterable) def __getitem__(self, item): return self.elements[item] def __len__(self): return len(self.elements) def __setitem__(self, item, value): pass def __delitem__(self, item): pass def insert(self, item, index): pass expl = callequal(TestSequence([0, 1]), list([0, 2])) assert len(expl) > 1 def test_list_tuples(self): expl = callequal([], [(1,2)]) assert len(expl) > 1 expl = callequal([(1,2)], []) assert len(expl) > 1 def test_list_bad_repr(self): class A: def __repr__(self): raise ValueError(42) expl = callequal([], [A()]) assert 'ValueError' in "".join(expl) expl = callequal({}, {'1': A()}) assert 'faulty' in "".join(expl) def test_one_repr_empty(self): """ the faulty empty string repr did trigger a unbound local error in _diff_text """ class A(str): def __repr__(self): return '' expl = callequal(A(), '') assert not expl def test_repr_no_exc(self): expl = ' '.join(callequal('foo', 'bar')) assert 'raised in repr()' not in expl def test_unicode(self): left = py.builtin._totext('£€', 'utf-8') right = py.builtin._totext('£', 'utf-8') expl = callequal(left, right) assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8') assert expl[1] == py.builtin._totext('- £€', 'utf-8') assert expl[2] == py.builtin._totext('+ £', 'utf-8') def test_nonascii_text(self): """ :issue: 877 non ascii python2 str caused a UnicodeDecodeError """ class A(str): def __repr__(self): return '\xff' expl = callequal(A(), '1') assert expl def test_format_nonascii_explanation(self): assert util.format_explanation('λ') def test_mojibake(self): # issue 429 left = 'e' right = '\xc3\xa9' if not isinstance(left, py.builtin.bytes): left = py.builtin.bytes(left, 'utf-8') right = py.builtin.bytes(right, 'utf-8') expl = callequal(left, right) for line in expl: assert isinstance(line, py.builtin.text) msg = py.builtin._totext('\n').join(expl) assert msg class TestFormatExplanation: def test_special_chars_full(self, testdir): # Issue 453, for the bug this would raise IndexError testdir.makepyfile(""" def test_foo(): assert '\\n}' == '' """) result = testdir.runpytest() assert result.ret == 1 result.stdout.fnmatch_lines([ "*AssertionError*", ]) def test_fmt_simple(self): expl = 'assert foo' assert util.format_explanation(expl) == 'assert foo' def test_fmt_where(self): expl = '\n'.join(['assert 1', '{1 = foo', '} == 2']) res = '\n'.join(['assert 1 == 2', ' + where 1 = foo']) assert util.format_explanation(expl) == res def test_fmt_and(self): expl = '\n'.join(['assert 1', '{1 = foo', '} == 2', '{2 = bar', '}']) res = '\n'.join(['assert 1 == 2', ' + where 1 = foo', ' + and 2 = bar']) assert util.format_explanation(expl) == res def test_fmt_where_nested(self): expl = '\n'.join(['assert 1', '{1 = foo', '{foo = bar', '}', '} == 2']) res = '\n'.join(['assert 1 == 2', ' + where 1 = foo', ' + where foo = bar']) assert util.format_explanation(expl) == res def test_fmt_newline(self): expl = '\n'.join(['assert "foo" == "bar"', '~- foo', '~+ bar']) res = '\n'.join(['assert "foo" == "bar"', ' - foo', ' + bar']) assert util.format_explanation(expl) == res def test_fmt_newline_escaped(self): expl = '\n'.join(['assert foo == bar', 'baz']) res = 'assert foo == bar\\nbaz' assert util.format_explanation(expl) == res def test_fmt_newline_before_where(self): expl = '\n'.join(['the assertion message here', '>assert 1', '{1 = foo', '} == 2', '{2 = bar', '}']) res = '\n'.join(['the assertion message here', 'assert 1 == 2', ' + where 1 = foo', ' + and 2 = bar']) assert util.format_explanation(expl) == res def test_fmt_multi_newline_before_where(self): expl = '\n'.join(['the assertion', '~message here', '>assert 1', '{1 = foo', '} == 2', '{2 = bar', '}']) res = '\n'.join(['the assertion', ' message here', 'assert 1 == 2', ' + where 1 = foo', ' + and 2 = bar']) assert util.format_explanation(expl) == res def test_python25_compile_issue257(testdir): testdir.makepyfile(""" def test_rewritten(): assert 1 == 2 # some comment """) result = testdir.runpytest() assert result.ret == 1 result.stdout.fnmatch_lines(""" *E*assert 1 == 2* *1 failed* """) def test_rewritten(testdir): testdir.makepyfile(""" def test_rewritten(): assert "@py_builtins" in globals() """) assert testdir.runpytest().ret == 0 def test_reprcompare_notin(mock_config): detail = plugin.pytest_assertrepr_compare( mock_config, 'not in', 'foo', 'aaafoobbb')[1:] assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++'] def test_pytest_assertrepr_compare_integration(testdir): testdir.makepyfile(""" def test_hello(): x = set(range(100)) y = x.copy() y.remove(50) assert x == y """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*50*", ]) def test_sequence_comparison_uses_repr(testdir): testdir.makepyfile(""" def test_hello(): x = set("hello x") y = set("hello y") assert x == y """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*'x'*", "*E*Extra items*right*", "*E*'y'*", ]) def test_assert_compare_truncate_longmessage(monkeypatch, testdir): testdir.makepyfile(r""" def test_long(): a = list(range(200)) b = a[::2] a = '\n'.join(map(str, a)) b = '\n'.join(map(str, b)) assert a == b """) monkeypatch.delenv('CI', raising=False) result = testdir.runpytest() # without -vv, truncate the message showing a few diff lines only result.stdout.fnmatch_lines([ "*- 1", "*- 3", "*- 5", "*- 7", "*truncated (191 more lines)*use*-vv*", ]) result = testdir.runpytest('-vv') result.stdout.fnmatch_lines([ "*- 197", ]) monkeypatch.setenv('CI', '1') result = testdir.runpytest() result.stdout.fnmatch_lines([ "*- 197", ]) def test_assertrepr_loaded_per_dir(testdir): testdir.makepyfile(test_base=['def test_base(): assert 1 == 2']) a = testdir.mkdir('a') a_test = a.join('test_a.py') a_test.write('def test_a(): assert 1 == 2') a_conftest = a.join('conftest.py') a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]') b = testdir.mkdir('b') b_test = b.join('test_b.py') b_test.write('def test_b(): assert 1 == 2') b_conftest = b.join('conftest.py') b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]') result = testdir.runpytest() result.stdout.fnmatch_lines([ '*def test_base():*', '*E*assert 1 == 2*', '*def test_a():*', '*E*assert summary a*', '*def test_b():*', '*E*assert summary b*']) def test_assertion_options(testdir): testdir.makepyfile(""" def test_hello(): x = 3 assert x == 4 """) result = testdir.runpytest() assert "3 == 4" in result.stdout.str() off_options = (("--no-assert",), ("--nomagic",), ("--no-assert", "--nomagic"), ("--assert=plain",), ("--assert=plain", "--no-assert"), ("--assert=plain", "--nomagic"), ("--assert=plain", "--no-assert", "--nomagic")) for opt in off_options: result = testdir.runpytest_subprocess(*opt) assert "3 == 4" not in result.stdout.str() def test_old_assert_mode(testdir): testdir.makepyfile(""" def test_in_old_mode(): assert "@py_builtins" not in globals() """) result = testdir.runpytest_subprocess("--assert=reinterp") assert result.ret == 0 def test_triple_quoted_string_issue113(testdir): testdir.makepyfile(""" def test_hello(): assert "" == ''' '''""") result = testdir.runpytest("--fulltrace") result.stdout.fnmatch_lines([ "*1 failed*", ]) assert 'SyntaxError' not in result.stdout.str() def test_traceback_failure(testdir): p1 = testdir.makepyfile(""" def g(): return 2 def f(x): assert x == g() def test_onefails(): f(3) """) result = testdir.runpytest(p1, "--tb=long") result.stdout.fnmatch_lines([ "*test_traceback_failure.py F", "====* FAILURES *====", "____*____", "", " def test_onefails():", "> f(3)", "", "*test_*.py:6: ", "_ _ _ *", #"", " def f(x):", "> assert x == g()", "E assert 3 == 2", "E + where 2 = g()", "", "*test_traceback_failure.py:4: AssertionError" ]) result = testdir.runpytest(p1) # "auto" result.stdout.fnmatch_lines([ "*test_traceback_failure.py F", "====* FAILURES *====", "____*____", "", " def test_onefails():", "> f(3)", "", "*test_*.py:6: ", "", " def f(x):", "> assert x == g()", "E assert 3 == 2", "E + where 2 = g()", "", "*test_traceback_failure.py:4: AssertionError" ]) @pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" ) def test_warn_missing(testdir): testdir.makepyfile("") result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h") result.stderr.fnmatch_lines([ "*WARNING*assert statements are not executed*", ]) result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert") result.stderr.fnmatch_lines([ "*WARNING*assert statements are not executed*", ]) def test_recursion_source_decode(testdir): testdir.makepyfile(""" def test_something(): pass """) testdir.makeini(""" [pytest] python_files = *.py """) result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines(""" <Module*> """) def test_AssertionError_message(testdir): testdir.makepyfile(""" def test_hello(): x,y = 1,2 assert 0, (x,y) """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" *def test_hello* *assert 0, (x,y)* *AssertionError: (1, 2)* """) @pytest.mark.skipif(PY3, reason='This bug does not exist on PY3') def test_set_with_unsortable_elements(): # issue #718 class UnsortableKey(object): def __init__(self, name): self.name = name def __lt__(self, other): raise RuntimeError() def __repr__(self): return 'repr({0})'.format(self.name) def __eq__(self, other): return self.name == other.name def __hash__(self): return hash(self.name) left_set = set(UnsortableKey(str(i)) for i in range(1, 3)) right_set = set(UnsortableKey(str(i)) for i in range(2, 4)) expl = callequal(left_set, right_set, verbose=True) # skip first line because it contains the "construction" of the set, which does not have a guaranteed order expl = expl[1:] dedent = textwrap.dedent(""" Extra items in the left set: repr(1) Extra items in the right set: repr(3) Full diff (fallback to calling repr on each item): - repr(1) repr(2) + repr(3) """).strip() assert '\n'.join(expl) == dedent
unknown
codeparrot/codeparrot-clean
import contextlib import os import py_compile import shutil import sys import tempfile import threading import time import types import weakref import zipfile from importlib import import_module from pathlib import Path from subprocess import CompletedProcess from unittest import mock, skip, skipIf import pytz import django.__main__ from django.apps.registry import Apps from django.test import SimpleTestCase from django.test.utils import extend_sys_path from django.utils import autoreload from django.utils.autoreload import WatchmanUnavailable from .test_module import __main__ as test_main, main_module as test_main_module from .utils import on_macos_with_hfs class TestIterModulesAndFiles(SimpleTestCase): def import_and_cleanup(self, name): import_module(name) self.addCleanup(lambda: sys.path_importer_cache.clear()) self.addCleanup(lambda: sys.modules.pop(name, None)) def clear_autoreload_caches(self): autoreload.iter_modules_and_files.cache_clear() def assertFileFound(self, filename): # Some temp directories are symlinks. Python resolves these fully while # importing. resolved_filename = filename.resolve(strict=True) self.clear_autoreload_caches() # Test uncached access self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files())) # Test cached access self.assertIn(resolved_filename, list(autoreload.iter_all_python_module_files())) self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1) def assertFileNotFound(self, filename): resolved_filename = filename.resolve(strict=True) self.clear_autoreload_caches() # Test uncached access self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files())) # Test cached access self.assertNotIn(resolved_filename, list(autoreload.iter_all_python_module_files())) self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1) def temporary_file(self, filename): dirname = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, dirname) return Path(dirname) / filename def test_paths_are_pathlib_instances(self): for filename in autoreload.iter_all_python_module_files(): self.assertIsInstance(filename, Path) def test_file_added(self): """ When a file is added, it's returned by iter_all_python_module_files(). """ filename = self.temporary_file('test_deleted_removed_module.py') filename.touch() with extend_sys_path(str(filename.parent)): self.import_and_cleanup('test_deleted_removed_module') self.assertFileFound(filename.absolute()) def test_check_errors(self): """ When a file containing an error is imported in a function wrapped by check_errors(), gen_filenames() returns it. """ filename = self.temporary_file('test_syntax_error.py') filename.write_text("Ceci n'est pas du Python.") with extend_sys_path(str(filename.parent)): try: with self.assertRaises(SyntaxError): autoreload.check_errors(import_module)('test_syntax_error') finally: autoreload._exception = None self.assertFileFound(filename) def test_check_errors_catches_all_exceptions(self): """ Since Python may raise arbitrary exceptions when importing code, check_errors() must catch Exception, not just some subclasses. """ filename = self.temporary_file('test_exception.py') filename.write_text('raise Exception') with extend_sys_path(str(filename.parent)): try: with self.assertRaises(Exception): autoreload.check_errors(import_module)('test_exception') finally: autoreload._exception = None self.assertFileFound(filename) def test_zip_reload(self): """ Modules imported from zipped files have their archive location included in the result. """ zip_file = self.temporary_file('zip_import.zip') with zipfile.ZipFile(str(zip_file), 'w', zipfile.ZIP_DEFLATED) as zipf: zipf.writestr('test_zipped_file.py', '') with extend_sys_path(str(zip_file)): self.import_and_cleanup('test_zipped_file') self.assertFileFound(zip_file) def test_bytecode_conversion_to_source(self): """.pyc and .pyo files are included in the files list.""" filename = self.temporary_file('test_compiled.py') filename.touch() compiled_file = Path(py_compile.compile(str(filename), str(filename.with_suffix('.pyc')))) filename.unlink() with extend_sys_path(str(compiled_file.parent)): self.import_and_cleanup('test_compiled') self.assertFileFound(compiled_file) def test_weakref_in_sys_module(self): """iter_all_python_module_file() ignores weakref modules.""" time_proxy = weakref.proxy(time) sys.modules['time_proxy'] = time_proxy self.addCleanup(lambda: sys.modules.pop('time_proxy', None)) list(autoreload.iter_all_python_module_files()) # No crash. def test_module_without_spec(self): module = types.ModuleType('test_module') del module.__spec__ self.assertEqual(autoreload.iter_modules_and_files((module,), frozenset()), frozenset()) def test_main_module_is_resolved(self): main_module = sys.modules['__main__'] self.assertFileFound(Path(main_module.__file__)) def test_main_module_without_file_is_not_resolved(self): fake_main = types.ModuleType('__main__') self.assertEqual(autoreload.iter_modules_and_files((fake_main,), frozenset()), frozenset()) def test_path_with_embedded_null_bytes(self): for path in ( 'embedded_null_byte\x00.py', 'di\x00rectory/embedded_null_byte.py', ): with self.subTest(path=path): self.assertEqual( autoreload.iter_modules_and_files((), frozenset([path])), frozenset(), ) class TestChildArguments(SimpleTestCase): @mock.patch.dict(sys.modules, {'__main__': django.__main__}) @mock.patch('sys.argv', [django.__main__.__file__, 'runserver']) @mock.patch('sys.warnoptions', []) def test_run_as_module(self): self.assertEqual( autoreload.get_child_arguments(), [sys.executable, '-m', 'django', 'runserver'] ) @mock.patch.dict(sys.modules, {'__main__': test_main}) @mock.patch('sys.argv', [test_main.__file__, 'runserver']) @mock.patch('sys.warnoptions', []) def test_run_as_non_django_module(self): self.assertEqual( autoreload.get_child_arguments(), [sys.executable, '-m', 'utils_tests.test_module', 'runserver'], ) @mock.patch.dict(sys.modules, {'__main__': test_main_module}) @mock.patch('sys.argv', [test_main.__file__, 'runserver']) @mock.patch('sys.warnoptions', []) def test_run_as_non_django_module_non_package(self): self.assertEqual( autoreload.get_child_arguments(), [sys.executable, '-m', 'utils_tests.test_module.main_module', 'runserver'], ) @mock.patch('sys.argv', [__file__, 'runserver']) @mock.patch('sys.warnoptions', ['error']) def test_warnoptions(self): self.assertEqual( autoreload.get_child_arguments(), [sys.executable, '-Werror', __file__, 'runserver'] ) @mock.patch('sys.warnoptions', []) def test_exe_fallback(self): with tempfile.TemporaryDirectory() as tmpdir: exe_path = Path(tmpdir) / 'django-admin.exe' exe_path.touch() with mock.patch('sys.argv', [exe_path.with_suffix(''), 'runserver']): self.assertEqual( autoreload.get_child_arguments(), [exe_path, 'runserver'] ) @mock.patch('sys.warnoptions', []) def test_entrypoint_fallback(self): with tempfile.TemporaryDirectory() as tmpdir: script_path = Path(tmpdir) / 'django-admin-script.py' script_path.touch() with mock.patch('sys.argv', [script_path.with_name('django-admin'), 'runserver']): self.assertEqual( autoreload.get_child_arguments(), [sys.executable, script_path, 'runserver'] ) @mock.patch('sys.argv', ['does-not-exist', 'runserver']) @mock.patch('sys.warnoptions', []) def test_raises_runtimeerror(self): msg = 'Script does-not-exist does not exist.' with self.assertRaisesMessage(RuntimeError, msg): autoreload.get_child_arguments() @mock.patch('sys.argv', [__file__, 'runserver']) @mock.patch('sys.warnoptions', []) def test_module_no_spec(self): module = types.ModuleType('test_module') del module.__spec__ with mock.patch.dict(sys.modules, {'__main__': module}): self.assertEqual( autoreload.get_child_arguments(), [sys.executable, __file__, 'runserver'] ) class TestUtilities(SimpleTestCase): def test_is_django_module(self): for module, expected in ( (pytz, False), (sys, False), (autoreload, True) ): with self.subTest(module=module): self.assertIs(autoreload.is_django_module(module), expected) def test_is_django_path(self): for module, expected in ( (pytz.__file__, False), (contextlib.__file__, False), (autoreload.__file__, True) ): with self.subTest(module=module): self.assertIs(autoreload.is_django_path(module), expected) class TestCommonRoots(SimpleTestCase): def test_common_roots(self): paths = ( Path('/first/second'), Path('/first/second/third'), Path('/first/'), Path('/root/first/'), ) results = autoreload.common_roots(paths) self.assertCountEqual(results, [Path('/first/'), Path('/root/first/')]) class TestSysPathDirectories(SimpleTestCase): def setUp(self): self._directory = tempfile.TemporaryDirectory() self.directory = Path(self._directory.name).resolve(strict=True).absolute() self.file = self.directory / 'test' self.file.touch() def tearDown(self): self._directory.cleanup() def test_sys_paths_with_directories(self): with extend_sys_path(str(self.file)): paths = list(autoreload.sys_path_directories()) self.assertIn(self.file.parent, paths) def test_sys_paths_non_existing(self): nonexistent_file = Path(self.directory.name) / 'does_not_exist' with extend_sys_path(str(nonexistent_file)): paths = list(autoreload.sys_path_directories()) self.assertNotIn(nonexistent_file, paths) self.assertNotIn(nonexistent_file.parent, paths) def test_sys_paths_absolute(self): paths = list(autoreload.sys_path_directories()) self.assertTrue(all(p.is_absolute() for p in paths)) def test_sys_paths_directories(self): with extend_sys_path(str(self.directory)): paths = list(autoreload.sys_path_directories()) self.assertIn(self.directory, paths) class GetReloaderTests(SimpleTestCase): @mock.patch('django.utils.autoreload.WatchmanReloader') def test_watchman_unavailable(self, mocked_watchman): mocked_watchman.check_availability.side_effect = WatchmanUnavailable self.assertIsInstance(autoreload.get_reloader(), autoreload.StatReloader) @mock.patch.object(autoreload.WatchmanReloader, 'check_availability') def test_watchman_available(self, mocked_available): # If WatchmanUnavailable isn't raised, Watchman will be chosen. mocked_available.return_value = None result = autoreload.get_reloader() self.assertIsInstance(result, autoreload.WatchmanReloader) class RunWithReloaderTests(SimpleTestCase): @mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'}) @mock.patch('django.utils.autoreload.get_reloader') def test_swallows_keyboard_interrupt(self, mocked_get_reloader): mocked_get_reloader.side_effect = KeyboardInterrupt() autoreload.run_with_reloader(lambda: None) # No exception @mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'false'}) @mock.patch('django.utils.autoreload.restart_with_reloader') def test_calls_sys_exit(self, mocked_restart_reloader): mocked_restart_reloader.return_value = 1 with self.assertRaises(SystemExit) as exc: autoreload.run_with_reloader(lambda: None) self.assertEqual(exc.exception.code, 1) @mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: 'true'}) @mock.patch('django.utils.autoreload.start_django') @mock.patch('django.utils.autoreload.get_reloader') def test_calls_start_django(self, mocked_reloader, mocked_start_django): mocked_reloader.return_value = mock.sentinel.RELOADER autoreload.run_with_reloader(mock.sentinel.METHOD) self.assertEqual(mocked_start_django.call_count, 1) self.assertSequenceEqual( mocked_start_django.call_args[0], [mock.sentinel.RELOADER, mock.sentinel.METHOD] ) class StartDjangoTests(SimpleTestCase): @mock.patch('django.utils.autoreload.StatReloader') def test_watchman_becomes_unavailable(self, mocked_stat): mocked_stat.should_stop.return_value = True fake_reloader = mock.MagicMock() fake_reloader.should_stop = False fake_reloader.run.side_effect = autoreload.WatchmanUnavailable() autoreload.start_django(fake_reloader, lambda: None) self.assertEqual(mocked_stat.call_count, 1) @mock.patch('django.utils.autoreload.ensure_echo_on') def test_echo_on_called(self, mocked_echo): fake_reloader = mock.MagicMock() autoreload.start_django(fake_reloader, lambda: None) self.assertEqual(mocked_echo.call_count, 1) @mock.patch('django.utils.autoreload.check_errors') def test_check_errors_called(self, mocked_check_errors): fake_method = mock.MagicMock(return_value=None) fake_reloader = mock.MagicMock() autoreload.start_django(fake_reloader, fake_method) self.assertCountEqual(mocked_check_errors.call_args[0], [fake_method]) @mock.patch('threading.Thread') @mock.patch('django.utils.autoreload.check_errors') def test_starts_thread_with_args(self, mocked_check_errors, mocked_thread): fake_reloader = mock.MagicMock() fake_main_func = mock.MagicMock() fake_thread = mock.MagicMock() mocked_check_errors.return_value = fake_main_func mocked_thread.return_value = fake_thread autoreload.start_django(fake_reloader, fake_main_func, 123, abc=123) self.assertEqual(mocked_thread.call_count, 1) self.assertEqual( mocked_thread.call_args[1], {'target': fake_main_func, 'args': (123,), 'kwargs': {'abc': 123}, 'name': 'django-main-thread'} ) self.assertIs(fake_thread.daemon, True) self.assertTrue(fake_thread.start.called) class TestCheckErrors(SimpleTestCase): def test_mutates_error_files(self): fake_method = mock.MagicMock(side_effect=RuntimeError()) wrapped = autoreload.check_errors(fake_method) with mock.patch.object(autoreload, '_error_files') as mocked_error_files: try: with self.assertRaises(RuntimeError): wrapped() finally: autoreload._exception = None self.assertEqual(mocked_error_files.append.call_count, 1) class TestRaiseLastException(SimpleTestCase): @mock.patch('django.utils.autoreload._exception', None) def test_no_exception(self): # Should raise no exception if _exception is None autoreload.raise_last_exception() def test_raises_exception(self): class MyException(Exception): pass # Create an exception try: raise MyException('Test Message') except MyException: exc_info = sys.exc_info() with mock.patch('django.utils.autoreload._exception', exc_info): with self.assertRaisesMessage(MyException, 'Test Message'): autoreload.raise_last_exception() def test_raises_custom_exception(self): class MyException(Exception): def __init__(self, msg, extra_context): super().__init__(msg) self.extra_context = extra_context # Create an exception. try: raise MyException('Test Message', 'extra context') except MyException: exc_info = sys.exc_info() with mock.patch('django.utils.autoreload._exception', exc_info): with self.assertRaisesMessage(MyException, 'Test Message'): autoreload.raise_last_exception() def test_raises_exception_with_context(self): try: raise Exception(2) except Exception as e: try: raise Exception(1) from e except Exception: exc_info = sys.exc_info() with mock.patch('django.utils.autoreload._exception', exc_info): with self.assertRaises(Exception) as cm: autoreload.raise_last_exception() self.assertEqual(cm.exception.args[0], 1) self.assertEqual(cm.exception.__cause__.args[0], 2) class RestartWithReloaderTests(SimpleTestCase): executable = '/usr/bin/python' def patch_autoreload(self, argv): patch_call = mock.patch('django.utils.autoreload.subprocess.run', return_value=CompletedProcess(argv, 0)) patches = [ mock.patch('django.utils.autoreload.sys.argv', argv), mock.patch('django.utils.autoreload.sys.executable', self.executable), mock.patch('django.utils.autoreload.sys.warnoptions', ['all']), ] for p in patches: p.start() self.addCleanup(p.stop) mock_call = patch_call.start() self.addCleanup(patch_call.stop) return mock_call def test_manage_py(self): with tempfile.TemporaryDirectory() as temp_dir: script = Path(temp_dir) / 'manage.py' script.touch() argv = [str(script), 'runserver'] mock_call = self.patch_autoreload(argv) autoreload.restart_with_reloader() self.assertEqual(mock_call.call_count, 1) self.assertEqual( mock_call.call_args[0][0], [self.executable, '-Wall'] + argv, ) def test_python_m_django(self): main = '/usr/lib/pythonX.Y/site-packages/django/__main__.py' argv = [main, 'runserver'] mock_call = self.patch_autoreload(argv) with mock.patch('django.__main__.__file__', main): with mock.patch.dict(sys.modules, {'__main__': django.__main__}): autoreload.restart_with_reloader() self.assertEqual(mock_call.call_count, 1) self.assertEqual(mock_call.call_args[0][0], [self.executable, '-Wall', '-m', 'django'] + argv[1:]) class ReloaderTests(SimpleTestCase): RELOADER_CLS = None def setUp(self): self._tempdir = tempfile.TemporaryDirectory() self.tempdir = Path(self._tempdir.name).resolve(strict=True).absolute() self.existing_file = self.ensure_file(self.tempdir / 'test.py') self.nonexistent_file = (self.tempdir / 'does_not_exist.py').absolute() self.reloader = self.RELOADER_CLS() def tearDown(self): self._tempdir.cleanup() self.reloader.stop() def ensure_file(self, path): path.parent.mkdir(exist_ok=True, parents=True) path.touch() # On Linux and Windows updating the mtime of a file using touch() will set a timestamp # value that is in the past, as the time value for the last kernel tick is used rather # than getting the correct absolute time. # To make testing simpler set the mtime to be the observed time when this function is # called. self.set_mtime(path, time.time()) return path.absolute() def set_mtime(self, fp, value): os.utime(str(fp), (value, value)) def increment_mtime(self, fp, by=1): current_time = time.time() self.set_mtime(fp, current_time + by) @contextlib.contextmanager def tick_twice(self): ticker = self.reloader.tick() next(ticker) yield next(ticker) class IntegrationTests: @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_glob(self, mocked_modules, notify_mock): non_py_file = self.ensure_file(self.tempdir / 'non_py_file') self.reloader.watch_dir(self.tempdir, '*.py') with self.tick_twice(): self.increment_mtime(non_py_file) self.increment_mtime(self.existing_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [self.existing_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_multiple_globs(self, mocked_modules, notify_mock): self.ensure_file(self.tempdir / 'x.test') self.reloader.watch_dir(self.tempdir, '*.py') self.reloader.watch_dir(self.tempdir, '*.test') with self.tick_twice(): self.increment_mtime(self.existing_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [self.existing_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_overlapping_globs(self, mocked_modules, notify_mock): self.reloader.watch_dir(self.tempdir, '*.py') self.reloader.watch_dir(self.tempdir, '*.p*') with self.tick_twice(): self.increment_mtime(self.existing_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [self.existing_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_glob_recursive(self, mocked_modules, notify_mock): non_py_file = self.ensure_file(self.tempdir / 'dir' / 'non_py_file') py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.py') with self.tick_twice(): self.increment_mtime(non_py_file) self.increment_mtime(py_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [py_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_multiple_recursive_globs(self, mocked_modules, notify_mock): non_py_file = self.ensure_file(self.tempdir / 'dir' / 'test.txt') py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.txt') self.reloader.watch_dir(self.tempdir, '**/*.py') with self.tick_twice(): self.increment_mtime(non_py_file) self.increment_mtime(py_file) self.assertEqual(notify_mock.call_count, 2) self.assertCountEqual(notify_mock.call_args_list, [mock.call(py_file), mock.call(non_py_file)]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_nested_glob_recursive(self, mocked_modules, notify_mock): inner_py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.py') self.reloader.watch_dir(inner_py_file.parent, '**/*.py') with self.tick_twice(): self.increment_mtime(inner_py_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [inner_py_file]) @mock.patch('django.utils.autoreload.BaseReloader.notify_file_changed') @mock.patch('django.utils.autoreload.iter_all_python_module_files', return_value=frozenset()) def test_overlapping_glob_recursive(self, mocked_modules, notify_mock): py_file = self.ensure_file(self.tempdir / 'dir' / 'file.py') self.reloader.watch_dir(self.tempdir, '**/*.p*') self.reloader.watch_dir(self.tempdir, '**/*.py*') with self.tick_twice(): self.increment_mtime(py_file) self.assertEqual(notify_mock.call_count, 1) self.assertCountEqual(notify_mock.call_args[0], [py_file]) class BaseReloaderTests(ReloaderTests): RELOADER_CLS = autoreload.BaseReloader def test_watch_dir_with_unresolvable_path(self): path = Path('unresolvable_directory') with mock.patch.object(Path, 'absolute', side_effect=FileNotFoundError): self.reloader.watch_dir(path, '**/*.mo') self.assertEqual(list(self.reloader.directory_globs), []) def test_watch_with_glob(self): self.reloader.watch_dir(self.tempdir, '*.py') watched_files = list(self.reloader.watched_files()) self.assertIn(self.existing_file, watched_files) def test_watch_files_with_recursive_glob(self): inner_file = self.ensure_file(self.tempdir / 'test' / 'test.py') self.reloader.watch_dir(self.tempdir, '**/*.py') watched_files = list(self.reloader.watched_files()) self.assertIn(self.existing_file, watched_files) self.assertIn(inner_file, watched_files) def test_run_loop_catches_stopiteration(self): def mocked_tick(): yield with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick: self.reloader.run_loop() self.assertEqual(tick.call_count, 1) def test_run_loop_stop_and_return(self): def mocked_tick(*args): yield self.reloader.stop() return # Raises StopIteration with mock.patch.object(self.reloader, 'tick', side_effect=mocked_tick) as tick: self.reloader.run_loop() self.assertEqual(tick.call_count, 1) def test_wait_for_apps_ready_checks_for_exception(self): app_reg = Apps() app_reg.ready_event.set() # thread.is_alive() is False if it's not started. dead_thread = threading.Thread() self.assertFalse(self.reloader.wait_for_apps_ready(app_reg, dead_thread)) def test_wait_for_apps_ready_without_exception(self): app_reg = Apps() app_reg.ready_event.set() thread = mock.MagicMock() thread.is_alive.return_value = True self.assertTrue(self.reloader.wait_for_apps_ready(app_reg, thread)) def skip_unless_watchman_available(): try: autoreload.WatchmanReloader.check_availability() except WatchmanUnavailable as e: return skip('Watchman unavailable: %s' % e) return lambda func: func @skip_unless_watchman_available() class WatchmanReloaderTests(ReloaderTests, IntegrationTests): RELOADER_CLS = autoreload.WatchmanReloader def setUp(self): super().setUp() # Shorten the timeout to speed up tests. self.reloader.client_timeout = 0.1 def test_watch_glob_ignores_non_existing_directories_two_levels(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir / 'does_not_exist' / 'more', ['*']) self.assertFalse(mocked_subscribe.called) def test_watch_glob_uses_existing_parent_directories(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir / 'does_not_exist', ['*']) self.assertSequenceEqual( mocked_subscribe.call_args[0], [ self.tempdir, 'glob-parent-does_not_exist:%s' % self.tempdir, ['anyof', ['match', 'does_not_exist/*', 'wholename']] ] ) def test_watch_glob_multiple_patterns(self): with mock.patch.object(self.reloader, '_subscribe') as mocked_subscribe: self.reloader._watch_glob(self.tempdir, ['*', '*.py']) self.assertSequenceEqual( mocked_subscribe.call_args[0], [ self.tempdir, 'glob:%s' % self.tempdir, ['anyof', ['match', '*', 'wholename'], ['match', '*.py', 'wholename']] ] ) def test_watched_roots_contains_files(self): paths = self.reloader.watched_roots([self.existing_file]) self.assertIn(self.existing_file.parent, paths) def test_watched_roots_contains_directory_globs(self): self.reloader.watch_dir(self.tempdir, '*.py') paths = self.reloader.watched_roots([]) self.assertIn(self.tempdir, paths) def test_watched_roots_contains_sys_path(self): with extend_sys_path(str(self.tempdir)): paths = self.reloader.watched_roots([]) self.assertIn(self.tempdir, paths) def test_check_server_status(self): self.assertTrue(self.reloader.check_server_status()) def test_check_server_status_raises_error(self): with mock.patch.object(self.reloader.client, 'query') as mocked_query: mocked_query.side_effect = Exception() with self.assertRaises(autoreload.WatchmanUnavailable): self.reloader.check_server_status() @mock.patch('pywatchman.client') def test_check_availability(self, mocked_client): mocked_client().capabilityCheck.side_effect = Exception() with self.assertRaisesMessage(WatchmanUnavailable, 'Cannot connect to the watchman service'): self.RELOADER_CLS.check_availability() @mock.patch('pywatchman.client') def test_check_availability_lower_version(self, mocked_client): mocked_client().capabilityCheck.return_value = {'version': '4.8.10'} with self.assertRaisesMessage(WatchmanUnavailable, 'Watchman 4.9 or later is required.'): self.RELOADER_CLS.check_availability() def test_pywatchman_not_available(self): with mock.patch.object(autoreload, 'pywatchman') as mocked: mocked.__bool__.return_value = False with self.assertRaisesMessage(WatchmanUnavailable, 'pywatchman not installed.'): self.RELOADER_CLS.check_availability() def test_update_watches_raises_exceptions(self): class TestException(Exception): pass with mock.patch.object(self.reloader, '_update_watches') as mocked_watches: with mock.patch.object(self.reloader, 'check_server_status') as mocked_server_status: mocked_watches.side_effect = TestException() mocked_server_status.return_value = True with self.assertRaises(TestException): self.reloader.update_watches() self.assertIsInstance(mocked_server_status.call_args[0][0], TestException) @mock.patch.dict(os.environ, {'DJANGO_WATCHMAN_TIMEOUT': '10'}) def test_setting_timeout_from_environment_variable(self): self.assertEqual(self.RELOADER_CLS().client_timeout, 10) @skipIf(on_macos_with_hfs(), "These tests do not work with HFS+ as a filesystem") class StatReloaderTests(ReloaderTests, IntegrationTests): RELOADER_CLS = autoreload.StatReloader def setUp(self): super().setUp() # Shorten the sleep time to speed up tests. self.reloader.SLEEP_TIME = 0.01 @mock.patch('django.utils.autoreload.StatReloader.notify_file_changed') def test_tick_does_not_trigger_twice(self, mock_notify_file_changed): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]): ticker = self.reloader.tick() next(ticker) self.increment_mtime(self.existing_file) next(ticker) next(ticker) self.assertEqual(mock_notify_file_changed.call_count, 1) def test_snapshot_files_ignores_missing_files(self): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.nonexistent_file]): self.assertEqual(dict(self.reloader.snapshot_files()), {}) def test_snapshot_files_updates(self): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file]): snapshot1 = dict(self.reloader.snapshot_files()) self.assertIn(self.existing_file, snapshot1) self.increment_mtime(self.existing_file) snapshot2 = dict(self.reloader.snapshot_files()) self.assertNotEqual(snapshot1[self.existing_file], snapshot2[self.existing_file]) def test_snapshot_files_with_duplicates(self): with mock.patch.object(self.reloader, 'watched_files', return_value=[self.existing_file, self.existing_file]): snapshot = list(self.reloader.snapshot_files()) self.assertEqual(len(snapshot), 1) self.assertEqual(snapshot[0][0], self.existing_file)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable-msg=C0103 ############################################################################### # Copyright (c) 2006-2013 Franz Inc. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Public License v1.0 # which accompanies this distribution, and is available at # http://www.eclipse.org/legal/epl-v10.html ############################################################################### from __future__ import absolute_import from ..model.value import URI NS = "http://www.w3.org/2000/01/rdf-schema#" class RDFS: """ A 'static' class containing useful RDFS URIs. """ NAMESPACE = NS RESOURCE = URI(namespace=NS, localname="Resource") LITERAL = URI(namespace=NS, localname="Literal") CLASS = URI(namespace=NS, localname="Class") SUBCLASSOF = URI(namespace=NS, localname="subClassOf") SUBPROPERTYOF = URI(namespace=NS, localname="subPropertyOf") DOMAIN = URI(namespace=NS, localname="domain") RANGE = URI(namespace=NS, localname="range") COMMENT = URI(namespace=NS, localname="comment") LABEL = URI(namespace=NS, localname="label") DATATYPE = URI(namespace=NS, localname="Datatype") CONTAINER = URI(namespace=NS, localname="Container") MEMBER = URI(namespace=NS, localname="member") ISDEFINEDBY = URI(namespace=NS, localname="isDefinedBy") SEEALSO = URI(namespace=NS, localname="seeAlso") CONTAINERMEMBERSHIPPROPERTY = URI(namespace=NS, localname="ContainerMembershipProperty") ## map of uri strings to URI objects: uristr2obj = {} for name, uri in RDFS.__dict__.iteritems(): if name.upper() == name: RDFS.uristr2obj[str(uri)] = uri del RDFS.uristr2obj[NS]
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies --generator-output= behavior when using rules. """ import TestGyp test = TestGyp.TestGyp() test.writable(test.workpath('rules'), False) test.run_gyp('rules.gyp', '--generator-output=' + test.workpath('gypfiles'), chdir='rules') test.writable(test.workpath('rules'), True) test.relocate('rules', 'relocate/rules') test.relocate('gypfiles', 'relocate/gypfiles') test.writable(test.workpath('relocate/rules'), False) test.writable(test.workpath('relocate/rules/build'), True) test.writable(test.workpath('relocate/rules/subdir1/build'), True) test.writable(test.workpath('relocate/rules/subdir2/build'), True) test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True) test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles') expect = """\ Hello from program.c Hello from function1.in1 Hello from function2.in1 Hello from define3.in0 Hello from define4.in0 """ if test.format == 'xcode': chdir = 'relocate/rules/subdir1' else: chdir = 'relocate/gypfiles' test.run_built_executable('program', chdir=chdir, stdout=expect) test.must_match('relocate/rules/subdir2/rules-out/file1.out', "Hello from file1.in0\n") test.must_match('relocate/rules/subdir2/rules-out/file2.out', "Hello from file2.in0\n") test.must_match('relocate/rules/subdir2/rules-out/file3.out', "Hello from file3.in1\n") test.must_match('relocate/rules/subdir2/rules-out/file4.out', "Hello from file4.in1\n") test.pass_test()
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse from ..utils import ( xpath_text, xpath_with_ns, int_or_none, parse_iso8601, ) class BetIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html' _TESTS = [ { 'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html', 'info_dict': { 'id': '417cd61c-c793-4e8e-b006-e445ecc45add', 'display_id': 'in-bet-exclusive-obama-talks-race-and-racism', 'ext': 'flv', 'title': 'BET News Presents: A Conversation With President Obama', 'description': 'md5:5a88d8ae912c1b33e090290af7ec33c6', 'duration': 1534, 'timestamp': 1418075340, 'upload_date': '20141208', 'uploader': 'admin', 'thumbnail': 're:(?i)^https?://.*\.jpg$', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html', 'info_dict': { 'id': '4160e53b-ad41-43b1-980f-8d85f63121f4', 'display_id': 'justice-for-ferguson-a-community-reacts', 'ext': 'flv', 'title': 'Justice for Ferguson: A Community Reacts', 'description': 'A BET News special.', 'duration': 1696, 'timestamp': 1416942360, 'upload_date': '20141125', 'uploader': 'admin', 'thumbnail': 're:(?i)^https?://.*\.jpg$', }, 'params': { # rtmp download 'skip_download': True, }, } ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) media_url = compat_urllib_parse.unquote(self._search_regex( [r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"], webpage, 'media URL')) mrss = self._download_xml(media_url, display_id) item = mrss.find('./channel/item') NS_MAP = { 'dc': 'http://purl.org/dc/elements/1.1/', 'media': 'http://search.yahoo.com/mrss/', 'ka': 'http://kickapps.com/karss', } title = xpath_text(item, './title', 'title') description = xpath_text( item, './description', 'description', fatal=False) video_id = xpath_text(item, './guid', 'video id', fatal=False) timestamp = parse_iso8601(xpath_text( item, xpath_with_ns('./dc:date', NS_MAP), 'upload date', fatal=False)) uploader = xpath_text( item, xpath_with_ns('./dc:creator', NS_MAP), 'uploader', fatal=False) media_content = item.find( xpath_with_ns('./media:content', NS_MAP)) duration = int_or_none(media_content.get('duration')) smil_url = media_content.get('url') thumbnail = media_content.find( xpath_with_ns('./media:thumbnail', NS_MAP)).get('url') formats = self._extract_smil_formats(smil_url, display_id) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader': uploader, 'duration': duration, 'formats': formats, }
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true 1_000_000.times.map { { "foo" => "bar", "bar" => "baz", "baz" => "lol", "lol" => "lgtm" } }
ruby
github
https://github.com/ruby/ruby
benchmark/hash_literal_small4.rb
#!/usr/bin/python # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: clc_firewall_policy short_description: Create/delete/update firewall policies description: - Create or delete or update firewall polices on Centurylink Cloud version_added: "2.0" options: location: description: - Target datacenter for the firewall policy required: True state: description: - Whether to create or delete the firewall policy default: present required: False choices: ['present', 'absent'] source: description: - The list of source addresses for traffic on the originating firewall. This is required when state is 'present" default: None required: False destination: description: - The list of destination addresses for traffic on the terminating firewall. This is required when state is 'present' default: None required: False ports: description: - The list of ports associated with the policy. TCP and UDP can take in single ports or port ranges. default: None required: False choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'] firewall_policy_id: description: - Id of the firewall policy. This is required to update or delete an existing firewall policy default: None required: False source_account_alias: description: - CLC alias for the source account required: True destination_account_alias: description: - CLC alias for the destination account default: None required: False wait: description: - Whether to wait for the provisioning tasks to finish before returning. default: True required: False choices: [True, False] enabled: description: - Whether the firewall policy is enabled or disabled default: True required: False choices: [True, False] requirements: - python = 2.7 - requests >= 2.5.0 - clc-sdk author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' --- - name: Create Firewall Policy hosts: localhost gather_facts: False connection: local tasks: - name: Create / Verify an Firewall Policy at CenturyLink Cloud clc_firewall: source_account_alias: WFAD location: VA1 state: present source: 10.128.216.0/24 destination: 10.128.216.0/24 ports: Any destination_account_alias: WFAD --- - name: Delete Firewall Policy hosts: localhost gather_facts: False connection: local tasks: - name: Delete an Firewall Policy at CenturyLink Cloud clc_firewall: source_account_alias: WFAD location: VA1 state: absent firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 ''' RETURN = ''' firewall_policy_id: description: The fire wall policy id returned: success type: string sample: fc36f1bfd47242e488a9c44346438c05 firewall_policy: description: The fire wall policy information returned: success type: dict sample: { "destination":[ "10.1.1.0/24", "10.2.2.0/24" ], "destinationAccount":"wfad", "enabled":true, "id":"fc36f1bfd47242e488a9c44346438c05", "links":[ { "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", "rel":"self", "verbs":[ "GET", "PUT", "DELETE" ] } ], "ports":[ "any" ], "source":[ "10.1.1.0/24", "10.2.2.0/24" ], "status":"active" } ''' __version__ = '${version}' import os import urlparse from time import sleep from distutils.version import LooseVersion try: import requests except ImportError: REQUESTS_FOUND = False else: REQUESTS_FOUND = True try: import clc as clc_sdk from clc import APIFailedResponse except ImportError: CLC_FOUND = False clc_sdk = None else: CLC_FOUND = True from ansible.module_utils.basic import AnsibleModule class ClcFirewallPolicy: clc = None def __init__(self, module): """ Construct module """ self.clc = clc_sdk self.module = module self.firewall_dict = {} if not CLC_FOUND: self.module.fail_json( msg='clc-python-sdk required for this module') if not REQUESTS_FOUND: self.module.fail_json( msg='requests library is required for this module') if requests.__version__ and LooseVersion( requests.__version__) < LooseVersion('2.5.0'): self.module.fail_json( msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) @staticmethod def _define_module_argument_spec(): """ Define the argument spec for the ansible module :return: argument spec dictionary """ argument_spec = dict( location=dict(required=True), source_account_alias=dict(required=True, default=None), destination_account_alias=dict(default=None), firewall_policy_id=dict(default=None), ports=dict(default=None, type='list'), source=dict(default=None, type='list'), destination=dict(default=None, type='list'), wait=dict(default=True), state=dict(default='present', choices=['present', 'absent']), enabled=dict(default=True, choices=[True, False]) ) return argument_spec def process_request(self): """ Execute the main code path, and handle the request :return: none """ changed = False firewall_policy = None location = self.module.params.get('location') source_account_alias = self.module.params.get('source_account_alias') destination_account_alias = self.module.params.get( 'destination_account_alias') firewall_policy_id = self.module.params.get('firewall_policy_id') ports = self.module.params.get('ports') source = self.module.params.get('source') destination = self.module.params.get('destination') wait = self.module.params.get('wait') state = self.module.params.get('state') enabled = self.module.params.get('enabled') self.firewall_dict = { 'location': location, 'source_account_alias': source_account_alias, 'destination_account_alias': destination_account_alias, 'firewall_policy_id': firewall_policy_id, 'ports': ports, 'source': source, 'destination': destination, 'wait': wait, 'state': state, 'enabled': enabled} self._set_clc_credentials_from_env() if state == 'absent': changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( source_account_alias, location, self.firewall_dict) elif state == 'present': changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( source_account_alias, location, self.firewall_dict) return self.module.exit_json( changed=changed, firewall_policy_id=firewall_policy_id, firewall_policy=firewall_policy) @staticmethod def _get_policy_id_from_response(response): """ Method to parse out the policy id from creation response :param response: response from firewall creation API call :return: policy_id: firewall policy id from creation call """ url = response.get('links')[0]['href'] path = urlparse.urlparse(url).path path_list = os.path.split(path) policy_id = path_list[-1] return policy_id def _set_clc_credentials_from_env(self): """ Set the CLC Credentials on the sdk by reading environment variables :return: none """ env = os.environ v2_api_token = env.get('CLC_V2_API_TOKEN', False) v2_api_username = env.get('CLC_V2_API_USERNAME', False) v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url if v2_api_token and clc_alias: self.clc._LOGIN_TOKEN_V2 = v2_api_token self.clc._V2_ENABLED = True self.clc.ALIAS = clc_alias elif v2_api_username and v2_api_passwd: self.clc.v2.SetCredentials( api_username=v2_api_username, api_passwd=v2_api_passwd) else: return self.module.fail_json( msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " "environment variables") def _ensure_firewall_policy_is_present( self, source_account_alias, location, firewall_dict): """ Ensures that a given firewall policy is present :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_dict: dictionary of request parameters for firewall policy :return: (changed, firewall_policy_id, firewall_policy) changed: flag for if a change occurred firewall_policy_id: the firewall policy id that was created/updated firewall_policy: The firewall_policy object """ firewall_policy = None firewall_policy_id = firewall_dict.get('firewall_policy_id') if firewall_policy_id is None: if not self.module.check_mode: response = self._create_firewall_policy( source_account_alias, location, firewall_dict) firewall_policy_id = self._get_policy_id_from_response( response) changed = True else: firewall_policy = self._get_firewall_policy( source_account_alias, location, firewall_policy_id) if not firewall_policy: return self.module.fail_json( msg='Unable to find the firewall policy id : {0}'.format( firewall_policy_id)) changed = self._compare_get_request_with_dict( firewall_policy, firewall_dict) if not self.module.check_mode and changed: self._update_firewall_policy( source_account_alias, location, firewall_policy_id, firewall_dict) if changed and firewall_policy_id: firewall_policy = self._wait_for_requests_to_complete( source_account_alias, location, firewall_policy_id) return changed, firewall_policy_id, firewall_policy def _ensure_firewall_policy_is_absent( self, source_account_alias, location, firewall_dict): """ Ensures that a given firewall policy is removed if present :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_dict: firewall policy to delete :return: (changed, firewall_policy_id, response) changed: flag for if a change occurred firewall_policy_id: the firewall policy id that was deleted response: response from CLC API call """ changed = False response = [] firewall_policy_id = firewall_dict.get('firewall_policy_id') result = self._get_firewall_policy( source_account_alias, location, firewall_policy_id) if result: if not self.module.check_mode: response = self._delete_firewall_policy( source_account_alias, location, firewall_policy_id) changed = True return changed, firewall_policy_id, response def _create_firewall_policy( self, source_account_alias, location, firewall_dict): """ Creates the firewall policy for the given account alias :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_dict: dictionary of request parameters for firewall policy :return: response from CLC API call """ payload = { 'destinationAccount': firewall_dict.get('destination_account_alias'), 'source': firewall_dict.get('source'), 'destination': firewall_dict.get('destination'), 'ports': firewall_dict.get('ports')} try: response = self.clc.v2.API.Call( 'POST', '/v2-experimental/firewallPolicies/%s/%s' % (source_account_alias, location), payload) except APIFailedResponse as e: return self.module.fail_json( msg="Unable to create firewall policy. %s" % str(e.response_text)) return response def _delete_firewall_policy( self, source_account_alias, location, firewall_policy_id): """ Deletes a given firewall policy for an account alias in a datacenter :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: firewall policy id to delete :return: response: response from CLC API call """ try: response = self.clc.v2.API.Call( 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % (source_account_alias, location, firewall_policy_id)) except APIFailedResponse as e: return self.module.fail_json( msg="Unable to delete the firewall policy id : {0}. {1}".format( firewall_policy_id, str(e.response_text))) return response def _update_firewall_policy( self, source_account_alias, location, firewall_policy_id, firewall_dict): """ Updates a firewall policy for a given datacenter and account alias :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: firewall policy id to update :param firewall_dict: dictionary of request parameters for firewall policy :return: response: response from CLC API call """ try: response = self.clc.v2.API.Call( 'PUT', '/v2-experimental/firewallPolicies/%s/%s/%s' % (source_account_alias, location, firewall_policy_id), firewall_dict) except APIFailedResponse as e: return self.module.fail_json( msg="Unable to update the firewall policy id : {0}. {1}".format( firewall_policy_id, str(e.response_text))) return response @staticmethod def _compare_get_request_with_dict(response, firewall_dict): """ Helper method to compare the json response for getting the firewall policy with the request parameters :param response: response from the get method :param firewall_dict: dictionary of request parameters for firewall policy :return: changed: Boolean that returns true if there are differences between the response parameters and the playbook parameters """ changed = False response_dest_account_alias = response.get('destinationAccount') response_enabled = response.get('enabled') response_source = response.get('source') response_dest = response.get('destination') response_ports = response.get('ports') request_dest_account_alias = firewall_dict.get( 'destination_account_alias') request_enabled = firewall_dict.get('enabled') if request_enabled is None: request_enabled = True request_source = firewall_dict.get('source') request_dest = firewall_dict.get('destination') request_ports = firewall_dict.get('ports') if ( response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( response_enabled != request_enabled) or ( response_source and response_source != request_source) or ( response_dest and response_dest != request_dest) or ( response_ports and response_ports != request_ports): changed = True return changed def _get_firewall_policy( self, source_account_alias, location, firewall_policy_id): """ Get back details for a particular firewall policy :param source_account_alias: the source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: id of the firewall policy to get :return: response - The response from CLC API call """ response = None try: response = self.clc.v2.API.Call( 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % (source_account_alias, location, firewall_policy_id)) except APIFailedResponse as e: if e.response_status_code != 404: self.module.fail_json( msg="Unable to fetch the firewall policy with id : {0}. {1}".format( firewall_policy_id, str(e.response_text))) return response def _wait_for_requests_to_complete( self, source_account_alias, location, firewall_policy_id, wait_limit=50): """ Waits until the CLC requests are complete if the wait argument is True :param source_account_alias: The source account alias for the firewall policy :param location: datacenter of the firewall policy :param firewall_policy_id: The firewall policy id :param wait_limit: The number of times to check the status for completion :return: the firewall_policy object """ wait = self.module.params.get('wait') count = 0 firewall_policy = None while wait: count += 1 firewall_policy = self._get_firewall_policy( source_account_alias, location, firewall_policy_id) status = firewall_policy.get('status') if status == 'active' or count > wait_limit: wait = False else: # wait for 2 seconds sleep(2) return firewall_policy @staticmethod def _set_user_agent(clc): if hasattr(clc, 'SetRequestsSession'): agent_string = "ClcAnsibleModule/" + __version__ ses = requests.Session() ses.headers.update({"Api-Client": agent_string}) ses.headers['User-Agent'] += " " + agent_string clc.SetRequestsSession(ses) def main(): """ The main function. Instantiates the module and calls process_request. :return: none """ module = AnsibleModule( argument_spec=ClcFirewallPolicy._define_module_argument_spec(), supports_check_mode=True) clc_firewall = ClcFirewallPolicy(module) clc_firewall.process_request() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
'use strict'; const common = require('../common.js'); const { win32 } = require('path'); const bench = common.createBenchmark(main, { path: [ '', 'C:\\', 'C:\\foo', '\\foo', 'E:\\foo\\bar.baz', 'foo\\.bar.baz', 'foo\\bar', '\\foo\\bar\\baz\\asdf\\.quux', ], n: [1e5], }); function main({ n, path }) { bench.start(); for (let i = 0; i < n; i++) { win32.parse(i % 3 === 0 ? `${path}${i}` : path); } bench.end(n); }
javascript
github
https://github.com/nodejs/node
benchmark/path/parse-win32.js
# -*- coding: utf-8 -*- # This file is part of emesene. # # emesene is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # emesene is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with emesene; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA '''a module that defines the api of objects that display dialogs''' import traceback import os import gtk import pango import gobject import tempfile import e3 import gui import utils import stock import extension import ContactInformation import SearchEntry import logging log = logging.getLogger('gtkui.Dialog') try: #check for webkit gi package from gui.gtkui import check_gtk3 if check_gtk3(): import gi.pygtkcompat gi.pygtkcompat.enable_webkit(version='3.0') import webkit use_webkit = True except (ImportError, ValueError): use_webkit = False # A list of all open dialogs dialogs = [] class Dialog(object): '''a class full of static methods to handle dialogs, dont instantiate it''' NAME = 'Dialog' DESCRIPTION = 'Class to show all the dialogs of the application' AUTHOR = 'Mariano Guerra' WEBSITE = 'www.emesene.org' @classmethod def close_all(cls): '''close all left open dialogs''' global dialogs for dialog in list(dialogs): dialog.destroy() @classmethod def window_destroy(cls, window): '''properly close and remove a dialog''' global dialogs dialogs.remove(window) window.destroy() @classmethod def window_add_image(cls, window, stock_id): '''add a stock image as the first element of the window.hbox''' image = gtk.image_new_from_stock(stock_id, gtk.ICON_SIZE_DIALOG) alignment = gtk.Alignment(xalign=0.0, yalign=0.1) alignment.add(image) window.hbox.pack_start(alignment, False) alignment.show_all() return image @classmethod def window_add_label(cls, window, text): '''add a label with the text (as pango) on the window''' def on_activate_link(label, uri): gui.base.Desktop.open(uri) return True label = gtk.Label() label.connect('activate-link', on_activate_link) #label.set_selectable(True) label.set_use_markup(True) label.set_markup('<span>' + \ text + "</span>") window.hbox.pack_start(label, True, True) label.show() return label @classmethod def window_add_label_vbox(cls, window, text): '''add a label with the text (as pango) on the window''' label = gtk.Label() label.set_selectable(True) label.set_use_markup(True) label.set_markup('<span>' + \ text + "</span>") window.vbox.pack_start(label, True, True) label.show() return label @classmethod def close_cb(cls, widget, event, window, response_cb, *args): '''default close callback, call response_cb with args if it's not None''' if response_cb: response_cb(*args) window.destroy() @classmethod def default_cb(cls, widget, window, response_cb, *args): '''default callbacks, call response_cb with args if it's not None''' if response_cb: response_cb(*args) window.destroy() @classmethod def on_file_click_cb(cls, widget, window, response_cb): response_cb(gtk.STOCK_OPEN, widget.get_filename()) window.destroy() @classmethod def chooser_cb(cls, widget, window, response_cb, response): '''callback user for dialogs that contain a chooser, return the status and the selected file''' filename = window.chooser.get_filename() if response_cb: response_cb(response, filename) window.destroy() @classmethod def entry_cb(cls, widget, window, response_cb, *args): '''callback called when the entry is activated, it call the response callback with the stock.ACCEPT and append the value of the entry to args''' args = list(args) args.append(window.entry.get_text()) if response_cb: if isinstance(widget, gtk.Entry): response_cb(stock.ACCEPT, *args) else: response_cb(*args) window.destroy() @classmethod def add_contact_cb(cls, widget, window, response_cb, response): '''callback called when a button is selected on the add_contact dialog''' contact = window.entry.get_text() group = window.combo.get_model().get_value( window.combo.get_active_iter(), 0) window.destroy() response_cb(response, contact, group) @classmethod def common_window(cls, message, stock_id, response_cb, title, *args): '''create a window that displays a message with a stock image''' window = cls.new_window(title, response_cb, *args) cls.window_add_image(window, stock_id) cls.window_add_label(window, message) return window @classmethod def message_window(cls, message, stock_id, response_cb, title): '''create a window that displays a message with a stock image and a close button''' window = cls.common_window(message, stock_id, response_cb, title) cls.add_button(window, gtk.STOCK_CLOSE, stock.CLOSE, response_cb, cls.default_cb) window.set_modal(True) return window @classmethod def entry_window(cls, message, text, response_cb, title, *args): '''create a window that contains a label and a entry with text set and selected, and two buttons, accept, cancel''' window = cls.new_window(title, response_cb, stock.CANCEL, *args) cls.window_add_label(window, message) entry = gtk.Entry() entry.set_text(text) entry.select_region(0, -1) entry.connect('activate', cls.entry_cb, window, response_cb, *args) window.hbox.pack_start(entry, True, True) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, cls.entry_cb, *args) cls.add_button(window, gtk.STOCK_OK, stock.ACCEPT, response_cb, cls.entry_cb, *args) setattr(window, 'entry', entry) entry.show() return window @classmethod def add_button(cls, window, gtk_stock, stock_id, response_cb, callback, *args): '''add a button and connect the signal''' button = gtk.Button(stock=gtk_stock) window.bbox.pack_start(button, True, True) button.connect('clicked', callback, window, response_cb, stock_id, *args) button.show() return button @classmethod def new_window(cls, title, response_cb=None, *args): '''build a window with the default values and connect the common signals, return the window''' window = gtk.Window() window.set_title(title) window.set_role("dialog") window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) window.set_default_size(150, 100) window.set_position(gtk.WIN_POS_CENTER) window.set_border_width(8) vbox = gtk.VBox(spacing=4) hbox = gtk.HBox(spacing=4) bbox = gtk.HButtonBox() bbox.set_spacing(4) bbox.set_layout(gtk.BUTTONBOX_END) vbox.pack_start(hbox, True, True) vbox.pack_start(bbox, False) window.add(vbox) global dialogs dialogs.append(window) setattr(window, 'vbox', vbox) setattr(window, 'hbox', hbox) setattr(window, 'bbox', bbox) args = list(args) args.insert(0, stock.CLOSE) window.connect('delete-event', cls.close_cb, window, response_cb, *args) window.connect('destroy', cls.window_destroy) vbox.show_all() return window @classmethod def save_as(cls, current_path, response_cb, title=_("Save as")): '''show a save as dialog with the current directory set to path. the buttons should display a cancel and save buttons. the posible reasons are stock.CANCEL, stock.SAVE and stock.CLOSE''' window = cls.new_window(title, response_cb) window.set_default_size(640, 480) chooser = gtk.FileChooserWidget(gtk.FILE_CHOOSER_ACTION_SAVE) chooser.set_current_folder(current_path) setattr(window, 'chooser', chooser) window.hbox.pack_start(chooser) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, cls.chooser_cb) cls.add_button(window, gtk.STOCK_SAVE, stock.SAVE, response_cb, cls.chooser_cb) window.show_all() @classmethod def choose_file(cls, current_path, response_cb, title=_("Choose file")): '''show a choose dialog with the current directory set to path. the buttons should display a cancel and save buttons. the posible reasons are stock.CANCEL, stock.SAVE and stock.CLOSE''' window = cls.new_window(title, response_cb, current_path) window.set_default_size(640, 480) chooser = gtk.FileChooserWidget(gtk.FILE_CHOOSER_ACTION_OPEN) chooser.set_current_folder(current_path) setattr(window, 'chooser', chooser) window.hbox.pack_start(chooser) chooser.connect("file-activated", cls.on_file_click_cb, window, response_cb) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, cls.chooser_cb) cls.add_button(window, gtk.STOCK_OPEN, stock.OPEN, response_cb, cls.chooser_cb) window.show_all() @classmethod def error(cls, message, response_cb=None, title=_("Error!")): '''show an error dialog displaying the message, this dialog should have only the option to close and the response callback is optional since in few cases one want to know when the error dialog was closed, but it can happen, so return stock.CLOSE to the callback if its set''' cls.message_window(message, gtk.STOCK_DIALOG_ERROR, response_cb, title).show() @classmethod def exc_error(cls, message, response_cb=None, title=_("Error!")): '''show an error dialog displaying the message and the traceback; this dialog should have only the option to close and the response callback is optional since in few cases one want to know when the error dialog was closed, but it can happen, so return stock.CLOSE to the callback if its set''' #cls.message_window('%s\n\n%s' % (message, traceback.format_exc()), # gtk.STOCK_DIALOG_ERROR, response_cb, title).show() window = gtk.Window() vbox = gtk.VBox() text = gtk.Label(message) vbox.pack_start(text) hide_button = gtk.ToggleButton(_('Show details')) trace = gtk.Label(traceback.format_exc()) def on_hide(*args): if hide_button.get_active(): #show hide_button.set_label(_('Hide details')) trace.show() else: hide_button.set_label(_('Show details')) trace.hide() hide_button.connect('toggled', on_hide) close_button = gtk.Button(stock=gtk.STOCK_OK) def on_ok(*args): window.hide() close_button.connect('clicked', on_ok) vbox.pack_start(hide_button, False, False) vbox.pack_start(trace) vbox.pack_start(close_button, False, False) window.add(vbox) window.show_all() on_hide() @classmethod def warning(cls, message, response_cb=None, title=_("Warning")): '''show a warning dialog displaying the messge, this dialog should have only the option to accept, like the error dialog, the response callback is optional, but you have to check if it's not None and send the response (that can be stock.ACCEPT or stock.CLOSE, if the user closed the window with the x)''' cls.message_window(message, gtk.STOCK_DIALOG_WARNING, response_cb, title).show() @classmethod def information(cls, message, response_cb=None, title=_("Information"),): '''show a warning dialog displaying the messge, this dialog should have only the option to accept, like the error dialog, the response callback is optional, but you have to check if it's not None and send the response (that can be stock.ACCEPT or stock.CLOSE, if the user closed the window with the x)''' cls.message_window(message, gtk.STOCK_DIALOG_INFO, response_cb, title).show() @classmethod def exception(cls, message, response_cb=None, title=_("Exception"),): '''show the message of an exception on a dialog, useful to connect with sys.excepthook''' window = cls.new_window(title, response_cb) label = cls.window_add_label(window, message) cls.add_button(window, gtk.STOCK_CLOSE, stock.CLOSE, response_cb, cls.default_cb) cls.window_add_label(window, label) window.show() @classmethod def yes_no(cls, message, response_cb, *args): '''show a confirm dialog displaying a question and two buttons: Yes and No, return the response as stock.YES or stock.NO or stock.CLOSE if the user closes the window''' window = cls.common_window(message, gtk.STOCK_DIALOG_QUESTION, response_cb, _("Confirm"), stock.NO) cls.add_button(window, gtk.STOCK_NO, stock.NO, response_cb, cls.default_cb, *args) cls.add_button(window, gtk.STOCK_YES, stock.YES, response_cb, cls.default_cb, *args) window.set_modal(True) window.show() @classmethod def yes_no_cancel(cls, message, response_cb, *args): '''show a confirm dialog displaying a question and three buttons: Yes and No and Cancel, return the response as stock.YES, stock.NO, stock.CANCEL or stock.CLOSE if the user closes the window''' window = cls.common_window(message, gtk.STOCK_DIALOG_QUESTION, response_cb, _("Confirm")) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, cls.default_cb, *args) cls.add_button(window, gtk.STOCK_NO, stock.NO, response_cb, cls.default_cb, *args) cls.add_button(window, gtk.STOCK_YES, stock.YES, response_cb, cls.default_cb, *args) window.show() @classmethod def accept_cancel(cls, message, response_cb, *args): '''show a confirm dialog displaying information and two buttons: Accept and Cancel, return stock.ACCEPT, stock.CANCEL or stock.CLOSE''' window = cls.common_window(message, gtk.STOCK_DIALOG_QUESTION, response_cb, _("Confirm")) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, cls.default_cb, *args) cls.add_button(window, gtk.STOCK_OK, stock.ACCEPT, response_cb, cls.default_cb, *args) window.show() @classmethod def contact_added_you(cls, accounts, response_cb, title=_("User invitation")): '''show a dialog displaying information about users that added you to their userlists, the accounts parameter is a tuple of mail, nick that represent all the users that added you, the way you confirm (one or more dialogs) doesn't matter, but you should call the response callback only once with a dict with two keys 'accepted' and 'rejected' and a list of mail addresses as values ''' global dialogs for i in dialogs: if type(i) == AddBuddy: i.destroy() dialog = AddBuddy(response_cb) for account, nick in accounts: dialog.append(nick, account) dialog.show() @classmethod def add_contact(cls, groups, group_selected, response_cb, title=_("Add user")): '''show a dialog asking for an user address, and (optional) the group(s) where the user should be added, the response callback receives the response type (stock.ADD, stock.CANCEL or stock.CLOSE) the account and a tuple of group names where the user should be added (give a empty tuple if you don't implement this feature, the controls are made by the callback, you just ask for the email, don't make any control, you are just implementing a GUI! :P''' window = cls.new_window(title, response_cb) label = gtk.Label(_("Account")) label_align = gtk.Alignment(0.0, 0.5) label_align.add(label) entry = gtk.Entry() group_label = gtk.Label(_("Group")) group_label_align = gtk.Alignment(0.0, 0.5) group_label_align.add(group_label) combo = gtk.combo_box_new_text() combo.append_text("") groups = list(groups) groups.sort(key = lambda x: x.name) selected = 0 for (index, group) in enumerate(groups): combo.append_text(group.name) if group_selected == group.name: selected = index + 1 combo.set_active(selected) table = gtk.Table(2, 2) table.attach(label_align, 0, 1, 0, 1) table.attach(entry, 1, 2, 0, 1) table.attach(group_label_align, 0, 1, 1, 2) table.attach(combo, 1, 2, 1, 2) table.set_row_spacings(2) table.set_col_spacings(8) window.hbox.pack_start(table, True, True) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, cls.add_contact_cb) cls.add_button(window, gtk.STOCK_OK, stock.ADD, response_cb, cls.add_contact_cb) setattr(window, 'entry', entry) setattr(window, 'combo', combo) entry.connect('activate', cls.add_contact_cb, window, response_cb, stock.ADD) window.show_all() @classmethod def add_group(cls, response_cb, title=_("Add group")): '''show a dialog asking for a group name, the response callback receives the response (stock.ADD, stock.CANCEL, stock.CLOSE) and the name of the group, the control for a valid group is made on the controller, so if the group is empty you just call the callback, to make a unified behaviour, and also, to only implement GUI logic on your code and not client logic cb args: response, group_name''' window = cls.entry_window(_("Group name"), '', response_cb, title) window.show() @classmethod def rename_group(cls, group, response_cb, title=_("Rename group")): '''show a dialog with the group name and ask to rename it, the response callback receives stock.ACCEPT, stock.CANCEL or stock.CLOSE the old and the new name. cb args: response, group, new_name ''' window = cls.entry_window(_("New group name"), group.name, response_cb, title, group) window.show() @classmethod def set_contact_alias(cls, account, alias, response_cb, title=_("Set alias")): '''show a dialog showing the current alias and asking for the new one, the response callback receives, the response (stock.ACCEPT, stock.CANCEL, stock.CLEAR <- to remove the alias or stock.CLOSE), the account, the old and the new alias. cb args: response, account, old_alias, new_alias''' alias = alias or '' window = cls.entry_window(_("Contact alias"), alias, response_cb, title, account, alias) cls.add_button(window, gtk.STOCK_CLEAR, stock.CLEAR, response_cb, cls.entry_cb, account, alias) window.show() @classmethod def about_dialog(cls, name, version, copyright, comments, license, website, authors, translators, logo_path): '''show an about dialog of the application: * title: the title of the window * name: the name of the appliaction * version: version as string * copyright: the name of the copyright holder * comments: a description of the application * license: the license text * website: the website url * authors: a list or tuple of strings containing the contributors * translators: a string containing the translators ''' def on_website_hook(dialog, web): '''called when the website item is selected''' gui.base.Desktop.open(web) def on_email_hook(dialog, mail): gui.base.Desktop.open("mailto://"+mail) about = gtk.AboutDialog() gtk.about_dialog_set_url_hook(on_website_hook) gtk.about_dialog_set_email_hook(on_email_hook) about.set_name(name) about.set_version(version) about.set_copyright(copyright) about.set_comments(comments) about.set_license(license) about.set_website(website) about.set_authors(authors) about.set_translator_credits(translators) icon = gtk.gdk.pixbuf_new_from_file(logo_path) about.set_logo(icon) about.run() about.destroy() @classmethod def contact_information_dialog(cls, session, account): '''shows information about the account''' ContactInformation.ContactInformation(session, account).show() @classmethod def select_font(cls, style, callback): '''select font and if available size and style, receives a e3.Message.Style object with the current style the callback receives a new style object with the new selection ''' if hasattr(gtk, "FontChooser"): #gtk 3.2+ window = cls.new_window(_('Select font')) font_chooser = gtk.FontChooserDialog.new(_('Select font'), window) color = style.color if font_chooser.run() == gtk.ResponseType.OK: fdesc = gtk.FontChooser.get_font_desc(font_chooser) style = utils.pango_font_description_to_style(fdesc) style.color.red = color.red style.color.green = color.green style.color.blue = color.blue style.color.alpha = color.alpha callback(style) font_chooser.destroy() window.destroy() else: def select_font_cb(button, window, callback, response, color_sel, color): '''callback called on button selection''' if response == stock.ACCEPT: window.destroy() fdesc = pango.FontDescription(font_sel.get_font_name()) style = utils.pango_font_description_to_style(fdesc) style.color.red = color.red style.color.green = color.green style.color.blue = color.blue style.color.alpha = color.alpha callback(style) window.destroy() window = cls.new_window(_('Select font')) font_sel = gtk.FontSelection() font_sel.set_preview_text(_('This is a preview text!')) fdesc = utils.style_to_pango_font_description(style) window.hbox.pack_start(font_sel, True, True) font_sel.set_font_name(fdesc.to_string()) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, callback, select_font_cb, font_sel, style.color) cls.add_button(window, gtk.STOCK_OK, stock.ACCEPT, callback, select_font_cb, font_sel, style.color) window.show_all() @classmethod def select_color(cls, color, callback): '''select color, receives a e3.Message.Color with the current color the callback receives a new color object woth the new selection ''' if hasattr(gtk, 'ColorChooserDialog'): def response_cb(dialog, response): if response == -5: color = gtk.gdk.RGBA() dialog.get_rgba(color) colors = color.to_string()[4:-1].split(",") e3_color = e3.Color(int(colors[0]),int(colors[1]),int(colors[2])) e3_color.aplha = color.alpha callback(e3_color) dialog.destroy() color_sel = gtk.ColorChooserDialog(_('Select color')) color_sel.connect("response", response_cb) current_color = gtk.gdk.RGBA() current_color.parse('#' + color.to_hex()) color_sel.set_rgba(current_color) color_sel.show() else: def select_color_cb(button, window, callback, response, color_sel): '''callback called on button selection''' if response == stock.ACCEPT: window.destroy() gtk_color = color_sel.get_current_color() color = e3.Color(gtk_color.red, gtk_color.green, gtk_color.blue) callback(color) window.destroy() window = cls.new_window(_('Select color')) color_sel = gtk.ColorSelection() window.hbox.pack_start(color_sel, True, True) color_sel.set_current_color(gtk.gdk.color_parse('#' + color.to_hex())) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, callback, select_color_cb, color_sel) cls.add_button(window, gtk.STOCK_OK, stock.ACCEPT, callback, select_color_cb, color_sel) window.show_all() @classmethod def select_emote(cls, session, theme, callback, max_width=16): '''select an emoticon, receives a gui.Theme object with the theme settings the callback receives the response and a string representing the selected emoticon ''' EmotesWindow(session, callback, max_width).show() @classmethod def select_image(cls, path, response_cb): '''select an image from the disk using the given default path returns a stock response and a string containing the path of the image ''' ImageChooser(path, response_cb).show() @classmethod def invite_dialog(cls, session, callback, l_buddy_exclude): '''select a contact to add to the conversation, receives a session object of the current session the callback receives the response and a string containing the selected account ''' InviteWindow(session, callback, l_buddy_exclude) @classmethod def login_preferences(cls, service, ext, callback, use_http, use_ipv6, proxy): """ display the preferences dialog for the login window cls -- the dialog class service -- the service string identifier (for example 'gtalk') callback -- callback to call if the user press accept, call with the new values use_http -- boolean that indicates if the e3 should use http method use_ipv6 -- boolean that indicates if the xmpp should use ipv6 to establish connection proxy -- a e3.Proxy object """ content = gtk.VBox(spacing=4) advanced = gtk.Expander(_("Advanced")) box = gtk.Table(11, 2) box.set_property('row-spacing', 4) box.set_property('column-spacing', 4) def expander_toggled(expander,param): if not expander.get_expanded(): expander.remove(box) window.resize(150,100) else: expander.add(box) window.show_all() advanced.connect('notify::expanded',expander_toggled) try: s_name = getattr(gui.theme.image_theme, "service_" + service) session_pixbuf = utils.safe_gtk_pixbuf_load(s_name) except: session_pixbuf = None session_image = gtk.Image() session_image.set_from_pixbuf(session_pixbuf) session_label = gtk.Label(service) t_proxy_host = gtk.Entry() t_proxy_port = gtk.Entry() t_user = gtk.Entry() t_passwd = gtk.Entry() t_server_host = gtk.Entry() t_server_port = gtk.Entry() c_use_auth = gtk.CheckButton(_('Use authentication')) def on_toggled(check_button, *entries): '''called when a check button is toggled, receive a set of entries, enable or disable them deppending on the state of the check button''' for entry in entries: entry.set_sensitive(check_button.get_active()) c_use_ipv6 = gtk.CheckButton(_('Use IPv6 connecion')) c_use_http = gtk.CheckButton(_('Use HTTP method')) c_use_proxy = gtk.CheckButton(_('Use proxy')) c_use_proxy.connect('toggled', on_toggled, t_proxy_host, t_proxy_port, c_use_auth) c_use_auth.connect('toggled', on_toggled, t_user, t_passwd) service_data = ext.SERVICES[service] t_server_host.set_text(service_data['host']) t_server_port.set_text(service_data['port']) t_proxy_host.set_text(proxy.host or '') t_proxy_port.set_text(proxy.port or '') t_user.set_text(proxy.user or '') t_passwd.set_text(proxy.passwd or '') t_passwd.set_visibility(False) c_use_http.set_active(use_http) c_use_ipv6.set_active(use_ipv6) c_use_proxy.set_active(proxy.use_proxy) c_use_proxy.toggled() c_use_auth.set_active(proxy.use_auth) c_use_auth.toggled() l_session = gtk.Label(_('Session:')) l_session.set_alignment(0.0, 0.5) l_server_host = gtk.Label(_('Server')) l_server_host.set_alignment(0.0, 0.5) l_server_port = gtk.Label(_('Port')) l_server_port.set_alignment(0.0, 0.5) l_host = gtk.Label(_('Host')) l_host.set_alignment(0.0, 0.5) l_port = gtk.Label(_('Port')) l_port.set_alignment(0.0, 0.5) l_user = gtk.Label(_('User')) l_user.set_alignment(0.0, 0.5) l_passwd = gtk.Label(_('Password')) l_passwd.set_alignment(0.0, 0.5) proxy_settings = (l_host, l_port, l_user, l_passwd, t_proxy_host, t_proxy_port, t_user, t_passwd, c_use_auth) box.attach(l_server_host, 0, 1, 0, 1) box.attach(t_server_host, 1, 2, 0, 1) box.attach(l_server_port, 0, 1, 1, 2) box.attach(t_server_port, 1, 2, 1, 2) if service == 'gtalk': box.attach(c_use_ipv6, 0, 2, 2, 3) box.attach(c_use_http, 0, 2, 3, 4) # TODO: FIXME: Temporary hack for 2.0 release. # msn (papylib) automagically gets system proxies if service != 'msn': box.attach(c_use_proxy, 0, 2, 4, 5) box.attach(l_host, 0, 1, 5, 6) box.attach(t_proxy_host, 1, 2, 5, 6) box.attach(l_port, 0, 1, 6, 7) box.attach(t_proxy_port, 1, 2, 6, 7) box.attach(c_use_auth, 0, 2, 7, 8) box.attach(l_user, 0, 1, 8, 9) box.attach(t_user, 1, 2, 8, 9) box.attach(l_passwd, 0, 1, 9, 10) box.attach(t_passwd, 1, 2, 9, 10) def response_cb(response): '''called on any response (close, accept, cancel) if accept get the new values and call callback with those values''' if response == stock.ACCEPT: use_http = c_use_http.get_active() use_ipv6 = c_use_ipv6.get_active() use_proxy = c_use_proxy.get_active() use_auth = c_use_auth.get_active() proxy_host = t_proxy_host.get_text() proxy_port = t_proxy_port.get_text() server_host = t_server_host.get_text() server_port = t_server_port.get_text() user = t_user.get_text() passwd = t_passwd.get_text() proxy = e3.Proxy(use_proxy, proxy_host, proxy_port, use_auth, user, passwd) callback(use_http, use_ipv6, proxy, service, server_host, server_port, True) for widget in proxy_settings: widget.destroy() window.destroy() def button_cb(button, window, response_cb, response): '''called when a button is pressedm get the response id and call the response_cb that will handle the event according to the response''' response_cb(response) window = cls.new_window(_('Preferences'), response_cb) window.set_modal(True) window.hbox.pack_start(content) session_box = gtk.HBox(spacing=4) session_box.pack_start(l_session) session_box.pack_start(session_image) session_box.pack_start(session_label) content.pack_start(session_box, False) content.pack_start(advanced, False) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, response_cb, button_cb) cls.add_button(window, gtk.STOCK_OK, stock.ACCEPT, response_cb, button_cb) window.show_all() @classmethod def edit_profile(cls, handler, user_nick, user_message, last_avatar): windows = gtk.Window() windows.set_modal(True) windows.set_border_width(5) windows.set_title(_('Change profile')) windows.set_position(gtk.WIN_POS_CENTER) windows.set_resizable(False) hbox = gtk.HBox(spacing=5) vbox = gtk.VBox() Avatar = extension.get_default('avatar') avatar = Avatar() avatar.set_size_request(96, 96) avatar.set_from_file(last_avatar) avatarEventBox = gtk.EventBox() avatarEventBox.add(avatar) hbox.pack_start(avatarEventBox) hbox.pack_start(vbox) nick_label = gtk.Label(_('Nick:')) nick_label.set_alignment(0.0, 0.5) nick = gtk.Entry() nick.set_text(user_nick) pm_label = gtk.Label(_('Message:')) pm_label.set_alignment(0.0, 0.5) pm = gtk.Entry() pm.set_text(user_message) savebutt = gtk.Button(stock=gtk.STOCK_SAVE) def save_profile(widget, data=None): '''save the new profile''' new_nick = nick.get_text() new_pm = pm.get_text() handler.save_profile(new_nick, new_pm) windows.destroy() savebutt.connect('clicked', save_profile) if handler.session.session_has_service(e3.Session.SERVICE_PROFILE_PICTURE): avatarEventBox.connect("button-press-event", handler.on_set_picture_selected) vbox0 = gtk.VBox() vbox0.pack_start(nick_label) vbox0.pack_start(nick) vbox0.pack_start(pm_label) vbox0.pack_start(pm) vbox.pack_start(vbox0) vbox.pack_start(savebutt) windows.add(hbox) windows.show_all() @classmethod def contactlist_format_help(cls, format_type): '''called when the help button for the nick or group format is pressed''' class TableText(gtk.Alignment): '''class that implements selectable labels aligned to the left''' def __init__(self, text): gtk.Alignment.__init__(self, xalign=0.0, yalign=0.0, xscale=0.0, yscale=0.0) self.label = gtk.Label(text) self.label.set_selectable(True) self.add(self.label) content = gtk.Table(homogeneous=True) if format_type == 'nick': window = cls.new_window(_('Nick Format Help')) cls.window_add_label_vbox(window, _('Example:')) cls.window_add_label_vbox(window, \ '[$DISPLAY_NAME][$NL][$small][$ACCOUNT][$/small][$NL][$small][$BLOCKED] ([$STATUS]) - [$MESSAGE][$/small]') content.attach(TableText('[$NICK]'), 0, 1, 0, 1) content.attach(TableText(_('Nickname')), 1, 2, 0, 1) content.attach(TableText('[$ACCOUNT]'), 0, 1, 1, 2) content.attach(TableText(_('Mail')), 1, 2, 1, 2) content.attach(TableText('[$DISPLAY_NAME]'), 0, 1, 2, 3) content.attach(TableText(_('Alias if available, or nick if available or mail')), 1, 2, 2, 3) content.attach(TableText('[$STATUS]'), 0, 1, 3, 4) content.attach(TableText(_('Status')), 1, 2, 3, 4) content.attach(TableText('[$MESSAGE]'), 0, 1, 4, 5) content.attach(TableText(_('Personal message')), 1, 2, 4, 5) content.attach(TableText('[$BLOCKED]'), 0, 1, 5, 6) content.attach(TableText(_('Displays \'Blocked\' if a contact is blocked')), 1, 2, 5, 6) last = 7 else: window = cls.new_window(_('Group Format Help')) content.attach(TableText('[$NAME]'), 0, 1, 0, 1) content.attach(TableText(_('The name of the group')), 1, 2, 0, 1) content.attach(TableText('[$ONLINE_COUNT]'), 0, 1, 1, 2) content.attach(TableText(_('Contacts online')), 1, 2, 1, 2) content.attach(TableText('[$TOTAL_COUNT]'), 0, 1, 2, 3) content.attach(TableText(_('Total amount of contacts')), 1, 2, 2, 3) last = 4 content.attach(TableText('[$b][$/b]'), 0, 1, last, last + 1) content.attach(TableText(_('Make text bold')), 1, 2, last, last + 1) content.attach(TableText('[$i][$/i]'), 0, 1, last + 1, last + 2) content.attach(TableText(_('Make text italic')), 1, 2, last + 1, last + 2) content.attach(TableText('[$small][$/small]'), 0, 1, last + 2, last + 3) content.attach(TableText(_('Make text small')), 1, 2, last + 2, last + 3) content.attach(TableText('[$COLOR=][$/COLOR]'), 0, 1, last + 3, last + 4) content.attach(TableText(_('Give text a color (in hex)')), 1, 2, last + 3, last + 4) window.hbox.pack_start(content) window.show_all() @classmethod def progress_window(cls, title, callback): '''returns a progress window used for emesene 1 synch''' dialog = ProgressWindow(title, callback) dialog.show_all() return dialog @classmethod def web_window(cls, title, url, callback): '''returns a window with a webview''' if not use_webkit: return None dialog = WebWindow(title, url, callback) dialog.show_all() return dialog @classmethod def broken_profile(cls, close_cb, profile_url): '''a dialog that asks you to fix your profile''' message = _('''\ Your live profile seems to be broken, which will cause you to experience issues with your display name, picture and/or personal message. You can fix it now by re-uploading your profile picture on the Live Messenger website that will open, or you can choose to fix it later. To fix your profile, emesene must be closed. Clicking Yes will close emesene. Do you want to fix your profile now?''') def fix_profile(button, close_cb): gui.base.Desktop.open(profile_url) close_cb() window = cls.common_window(message, gtk.STOCK_DIALOG_WARNING, None, _("You have a broken profile")) cls.add_button(window, gtk.STOCK_CANCEL, stock.CANCEL, None, cls.default_cb) cls.add_button(window, gtk.STOCK_YES, stock.YES, fix_profile, cls.default_cb, close_cb) window.show() class ImageChooser(gtk.Window): '''a class to select images''' def __init__(self, path, response_cb): '''class constructor, path is the directory where the dialog opens''' gtk.Window.__init__(self) global dialogs dialogs.append(self) self.response_cb = response_cb self.set_modal(True) self.set_title(_("Image Chooser")) self.set_default_size(600, 400) self.set_border_width(4) self.set_position(gtk.WIN_POS_CENTER) self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) self.image = None self.vbox = gtk.VBox(spacing=4) self.file_chooser = gtk.FileChooserWidget() self.file_chooser.set_current_folder(path) hbbox = gtk.HButtonBox() hbbox.set_spacing(4) hbbox.set_layout(gtk.BUTTONBOX_END) b_accept = gtk.Button(stock=gtk.STOCK_OK) b_cancel = gtk.Button(stock=gtk.STOCK_CANCEL) b_accept.connect('clicked', self._on_accept) b_cancel.connect('clicked', self._on_cancel) self.connect('delete-event', self._on_close) self.file_chooser.connect("file-activated", self._on_accept) hbbox.pack_start(b_cancel, False) hbbox.pack_start(b_accept, False) vbox = gtk.VBox() self.vbox.pack_start(self.file_chooser, True, True) vbox.add(self.vbox) vbox.pack_start(hbbox, False) self.add(vbox) vbox.show_all() self._add_filters() self._add_preview() def destroy(self): '''override destroy method''' global dialogs dialogs.remove(self) gtk.Window.destroy(self) def set_icon(self, icon): '''set the icon of the window''' if utils.file_readable(icon): gtk.Window.set_icon(self, utils.safe_gtk_image_load(icon).get_pixbuf()) def _add_filters(self): ''' Adds all the possible file filters to the dialog. The filters correspond to the gdk available image formats ''' # All images filter all_images = gtk.FileFilter() all_images.set_name(_('All images')) filters = [] formats = gtk.gdk.pixbuf_get_formats() for format_ in formats: filter_ = gtk.FileFilter() name = "%s (*.%s)" % (format_['description'], format_['name']) filter_.set_name(name) for mtype in format_['mime_types']: filter_.add_mime_type(mtype) all_images.add_mime_type(mtype) for pattern in format_['extensions']: tmp = '*.' + pattern filter_.add_pattern(tmp) all_images.add_pattern(tmp) filters.append(filter_) self.file_chooser.add_filter(all_images) self.file_chooser.set_filter(all_images) for filter_ in filters: self.file_chooser.add_filter(filter_) def _add_preview(self): ''' Adds a preview widget to the file chooser ''' self.image = gtk.Image() self.image.set_size_request(128, 128) self.image.show() self.file_chooser.set_preview_widget(self.image) self.file_chooser.set_preview_widget_active(True) self.file_chooser.connect('update-preview', self._on_update_preview) def _on_accept(self, button): '''method called when the user clicks the button''' filename = self.get_filename() if filename is None or not os.path.isfile(filename): extension.get_default('dialog').error(_("No picture selected")) return self.hide() self.response_cb(gui.stock.ACCEPT, filename) def _on_cancel(self, button): '''method called when the user clicks the button''' self.hide() self.response_cb(gui.stock.CANCEL, self.get_filename()) def _on_close(self, window, event): '''called when the user click on close''' self.hide() self.response_cb(gui.stock.CLOSE, self.get_filename()) def _on_update_preview(self, filechooser): ''' Updates the preview image ''' path = self.get_preview_filename() if path: # if the file is smaller than 1MB we # load it, otherwise we dont if os.path.isfile(path) and os.path.getsize(path) <= 1000000: try: pixbuf = gtk.gdk.pixbuf_new_from_file(self.get_filename()) if pixbuf.get_width() > 128 and pixbuf.get_height() > 128: pixbuf = pixbuf.scale_simple(128, 128, gtk.gdk.INTERP_BILINEAR) self.image.set_from_pixbuf(pixbuf) except gobject.GError: self.image.set_from_stock(gtk.STOCK_DIALOG_ERROR, gtk.ICON_SIZE_DIALOG) else: self.image.set_from_stock(gtk.STOCK_DIALOG_ERROR, gtk.ICON_SIZE_DIALOG) def get_filename(self): '''Shortcut to get a properly-encoded filename from a file chooser''' filename = self.file_chooser.get_filename() if filename: return gobject.filename_display_name(filename) else: return filename def get_preview_filename(self): '''Shortcut to get a properly-encoded preview filename''' filename = self.file_chooser.get_preview_filename() if filename: return gobject.filename_display_name(filename) else: return filename class CEChooser(ImageChooser): '''a dialog to create a custom emoticon''' SMALL = _("Small (16x16)") BIG = _("Big (50x50)") def __init__(self, path, response_cb, smilie_list): '''class constructor''' ImageChooser.__init__(self, path, None) global dialogs dialogs.append(self) self.response_cb = response_cb label = gtk.Label(_("Shortcut")) self.shortcut = gtk.Entry(7) self.combo = gtk.combo_box_new_text() self.combo.append_text(CEChooser.SMALL) self.combo.append_text(CEChooser.BIG) self.combo.set_active(0) hbox0 = gtk.HBox() hbox1 = gtk.HBox() vbox1 = gtk.VBox() vbox2 = gtk.VBox() hbox1.add(self.shortcut) hbox1.add(self.combo) vbox2.add(hbox1) vbox1.add(label) hbox0.add(vbox1) hbox0.add(vbox2) self.vbox.pack_start(hbox0, False) hbox0.show_all() self.smilie_list = smilie_list self._on_changed(None) self.shortcut.connect('changed', self._on_changed) def destroy(self): '''override destroy method''' global dialogs dialogs.remove(self) gtk.Window.destroy(self) def _on_accept(self, button): '''method called when the user clicks the button''' filename = self.get_filename() shortcut = self.shortcut.get_text() size = self.combo.get_model().get_value(self.combo.get_active_iter(), 0) if os.path.isfile(filename): if not shortcut: Dialog.error(_("Empty shortcut")) else: self.destroy() self.response_cb(stock.ACCEPT, filename, shortcut, size) else: Dialog.error(_("No picture selected")) def _on_cancel(self, button): '''method called when the user clicks the button''' self.destroy() self.response_cb(stock.CANCEL, None, None, None) def _on_close(self, window, event): '''called when the user click on close''' self.destroy() self.response_cb(stock.CLOSE, None, None, None) def _on_changed(self, shortcut): '''called when the text in self.shortcut changes''' SHORTCUT = self.shortcut.get_text() if SHORTCUT in self.smilie_list or SHORTCUT == "": self.shortcut.set_property('secondary-icon-stock', gtk.STOCK_DIALOG_ERROR) else: self.shortcut.set_property('secondary-icon-stock', None) class EmotesWindow(gtk.Window): """ This class represents a window to select an emoticon """ def __init__(self, session, emote_selected, max_width=8): """ Constructor. max_width -- the maximum number of columns """ gtk.Window.__init__(self) global dialogs dialogs.append(self) self.session = session self.caches = e3.cache.CacheManager(self.session.config_dir.base_dir) self.emcache = self.caches.get_emoticon_cache(self.session.account.account) self.shortcut_list = [] #XXX: Don't set undecorated on macos lion, it crash see #1065 import platform, sys if not (sys.platform == 'darwin' and platform.release().startswith('11.3')): self.set_decorated(False) self.set_role("emotes") self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) self.set_position(gtk.WIN_POS_MOUSE) self.set_resizable(False) self.max_width = max_width self.emote_selected = emote_selected self.table = gtk.Table(max_width) self._fill_emote_table(max_width) button = gtk.Button(_("Add emoticon")) button.set_image(gtk.image_new_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_BUTTON)) button.connect('clicked', self._on_add_custom_emote_selected) self.box = gtk.VBox() self.box.pack_start(self.table) self.box.pack_start(button) self.add(self.box) self.box.show_all() self.connect('leave-notify-event', self.on_leave_notify_event) self.connect('enter-notify-event', self.on_enter_notify_event) self.tag = None def destroy(self): '''override destroy method''' global dialogs if self in dialogs: dialogs.remove(self) gtk.Window.destroy(self) def on_leave_notify_event(self, *args): """ callback called when the mouse leaves this window """ if self.tag is None: self.tag = gobject.timeout_add(500, self.destroy) def on_enter_notify_event(self, *args): """ callback called when the mouse enters this window """ if self.tag: gobject.source_remove(self.tag) self.tag = None def _get_emo_image(self, path, size): '''try to return an image from path ''' pix = utils.safe_gtk_pixbuf_load(path, size) picture = gtk.image_new_from_pixbuf(pix) return picture def _fill_emote_table(self, columns): '''fill the gtk.Table with the emoticons''' emotes = [] count = 0 column = 0 row = 0 emote_theme = gui.theme.emote_theme def button_and_coords(shortcut, path): self.shortcut_list.append(shortcut) column = count % columns row = count / columns button = gtk.Button() button.set_image(self._get_emo_image(path, (20, 20))) button.set_tooltip_text(shortcut) button.set_relief(gtk.RELIEF_NONE) button.connect('clicked', self._on_emote_selected, shortcut) return (column, row, button) for shortcut, name in emote_theme.emotes.iteritems(): path = emote_theme.emote_to_path(shortcut, True) if path is None or name in emotes: continue emotes.append(name) column, row, button = button_and_coords(shortcut, path) self.table.attach(button, column, column + 1, row, row + 1) count += 1 for shortcut, hash_ in self.emcache.list(): path = os.path.join(self.emcache.path, hash_) column, row, button = button_and_coords(shortcut, path) button.connect('button-release-event', self._on_emote_clicked, shortcut) self.table.attach(button, column, column + 1, row, row + 1) count += 1 def _on_add_custom_emote_selected(self, button): ''' called when the user wants to add a custom emoticon ''' def _on_ce_choosed(response, path, shortcut, size): '''method called when the ce is selected''' if response != stock.ACCEPT: return if size == CEChooser.SMALL: size = 16 else: size = 50 image = gtk.gdk.PixbufAnimation(path) static = image.get_static_image() width = static.get_width() height = static.get_height() if width <= size and height <= size: #don't resize if less than size resized_path = path else: if width > height: ratio = float(size)/width else: ratio = float(size)/height width = int(width*ratio) height = int(height*ratio) fd, resized_path = tempfile.mkstemp() os.close(fd) if image.is_static_image(): #if static, resize using gtk image = static.scale_simple(width, height, gtk.gdk.INTERP_NEAREST) if gtk.gdk.pixbuf_get_file_info(path)[0]['name'] == 'jpeg': format = 'jpeg' else: format = 'png' image.save(resized_path, format) else: #resize animated images using imagemagick if not self.emcache.resize_with_imagemagick(path, resized_path, width, height): resized_path = path self.emcache.insert((shortcut, resized_path)) CEChooser(os.path.expanduser("~"), _on_ce_choosed, self.shortcut_list).show() def _on_emote_selected(self, button, shortcut): '''called when an emote is selected''' self.emote_selected(shortcut) self.destroy() def _on_emote_clicked(self, button, event, shortcut): '''intercept right click and show a nice menu''' if event.type == gtk.gdk.BUTTON_RELEASE and event.button == 3: emoticon_menu = gtk.Menu() emoticon_menu.connect('enter-notify-event', self.on_enter_notify_event) short_name = gtk.MenuItem(label=shortcut) short_edit = gtk.ImageMenuItem(_("Change shortcut")) short_edit.set_image(gtk.image_new_from_stock( gtk.STOCK_EDIT, gtk.ICON_SIZE_MENU)) short_edit.connect("activate", self._on_emote_shortcut_edit, shortcut) short_dele = gtk.ImageMenuItem(_("Delete")) short_dele.set_image(gtk.image_new_from_stock( gtk.STOCK_DELETE, gtk.ICON_SIZE_MENU)) short_dele.connect("activate", self._on_emote_shortcut_dele, shortcut) emoticon_menu.add(short_name) emoticon_menu.add(short_edit) emoticon_menu.add(short_dele) emoticon_menu.show_all() emoticon_menu.popup(None, None, None, event.button, event.time) def _on_emote_shortcut_edit(self, widget, shortcut): '''modify a shortcut for the selected custom emoticon''' self.destroy() cedict = self.emcache.parse() def _on_ce_edit_cb(response, emcache, shortcut, hash_, text=''): '''method called when the modification is done''' if response == stock.ACCEPT: if text: emcache.remove_entry(hash_) emcache.add_entry(text, hash_) else: Dialog.error(_("Empty shortcut")) window = Dialog.entry_window(_("New shortcut"), shortcut, _on_ce_edit_cb, _("Change shortcut"), self.emcache, shortcut, cedict[shortcut]) window.show() def _on_emote_shortcut_dele(self, widget, shortcut): '''delete a custom emoticon and its shortcut''' self.destroy() cedict = self.emcache.parse() #TODO: confirmation? or not? self.emcache.remove(cedict[shortcut]) class InviteWindow(gtk.Window): """ A window that display a list of users to select the ones to invite to the conversarion """ def __init__(self, session, callback, l_buddy_exclude): """ constructor """ gtk.Window.__init__(self) global dialogs dialogs.append(self) self.set_border_width(1) self.set_title(_('Invite friend')) self.set_default_size(300, 250) self.session = session self.callback = callback ContactList = extension.get_default('contact list') self.contact_list = ContactList(session) sel = self.contact_list.get_selection() sel.set_mode(gtk.SELECTION_MULTIPLE) self.contact_list.destroy_on_filtering = True self.contact_list.nick_template = \ '[$DISPLAY_NAME][$NL][$small][$ACCOUNT][$/small]' order_by_group = self.contact_list.session.config.b_order_by_group show_blocked = self.contact_list.session.config.b_show_blocked show_offline = self.contact_list.session.config.b_show_offline self.contact_list.order_by_group = False self.contact_list.show_blocked = False self.contact_list.show_offline = False self.contact_list.session.config.b_order_by_group = order_by_group self.contact_list.session.config.b_show_blocked = show_blocked self.contact_list.session.config.b_show_offline = show_offline self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) self.set_position(gtk.WIN_POS_CENTER) vbox = gtk.VBox() vbox.set_spacing(1) bbox = gtk.HButtonBox() bbox.set_spacing(1) bbox.set_layout(gtk.BUTTONBOX_END) badd = gtk.Button(stock=gtk.STOCK_ADD) bclose = gtk.Button(stock=gtk.STOCK_CLOSE) search = SearchEntry.SearchEntry() search.connect('changed', self._on_search_changed) scroll = gtk.ScrolledWindow() scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC) scroll.set_shadow_type(gtk.SHADOW_IN) scroll.set_border_width(1) scroll.add(self.contact_list) bbox.pack_start(bclose) bbox.pack_start(badd) vbox.pack_start(scroll, True, True) vbox.pack_start(search, False) vbox.pack_start(bbox, False) self.add(vbox) badd.connect('clicked', self._on_add_clicked) bclose.connect('clicked', lambda *args: self.destroy()) self.connect('key-press-event', self._on_key_press) self.connect('delete-event', lambda *args: self.destroy()) self.contact_list.contact_selected.subscribe( self._on_contact_selected) self.contact_list.fill() l_buddy_exclude.append(self.session.account.account) for buddy in l_buddy_exclude: self.contact_list.remove_contact(e3.Contact(buddy)) self.set_modal(True) self.show() vbox.show_all() def _on_key_press(self, widget, event): if event.keyval == gtk.keysyms.Escape: self.destroy() def _on_add_clicked(self, button): """ method called when the add button is clicked """ contacts = self.contact_list.get_contact_selected() if len(contacts) == 0: Dialog.error(_("No contact selected")) return for contact in contacts: self.callback(contact.account) self.destroy() def _on_search_changed(self, entry): """ called when the content of the entry changes """ self.contact_list.filter_text = entry.get_text() def _on_contact_selected(self, contact): """ method called when the contact is selected """ contacts = self.contact_list.get_contact_selected() if len(contacts) == 0: Dialog.error(_("No contact selected")) return for contact in contacts: self.callback(contact.account) self.destroy() def destroy(self): """ unsubscribe the signal, and destroy the dialog """ global dialogs dialogs.remove(self) # close current tooltip before window destroy self.contact_list.tooltips.hide() self.contact_list.contact_selected.unsubscribe( self._on_contact_selected) gtk.Window.destroy(self) class AddBuddy(gtk.Window): '''Confirm dialog informing that someone has added you ask if you want to add him to your contact list''' def __init__(self, callback): '''Constructor. Packs widgets''' gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL) global dialogs dialogs.append(self) self.mails = [] # [(mail, nick), ...] self.rejected = [] self.accepted = [] self.callback = callback self.pointer = 0 # window self.set_title(_("Add contact")) self.set_resizable(False) self.set_keep_above(True) self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) self.move(30, 30) # top-left self.connect('delete-event', self.cb_delete) ## widgets # main vbox self.vbox = gtk.VBox() # hbox with image, pages, and main text self.hbox = gtk.HBox() self.hbox.set_spacing(4) self.hbox.set_border_width(4) # the contents of the hbox (image+vboxtext) self.image = gtk.Image() self.image.set_from_stock(gtk.STOCK_DIALOG_QUESTION, \ gtk.ICON_SIZE_DIALOG) self.imagebox = gtk.HBox() self.imagebox.set_border_width(4) self.image.set_alignment(0.0, 0.5) # the vboxtext (pages+text) self.vboxtext = gtk.VBox() self.pages = self._buildpages() self.text = gtk.Label() self.text.set_selectable(True) self.text.set_ellipsize(3) #pango.ELLIPSIZE_END self.text.set_alignment(0.0, 0.0) # top left self.text.set_width_chars(60) # hboxbuttons + button box self.hboxbuttons = gtk.HBox() self.hboxbuttons.set_spacing(4) self.hboxbuttons.set_border_width(4) self.buttonbox = gtk.HButtonBox() self.buttonbox.set_layout(gtk.BUTTONBOX_END) # the contents of the buttonbox self.quit = gtk.Button(_('Quit'), gtk.STOCK_QUIT) self.quit.connect('clicked', self.destroy) self.later = gtk.Button() self.later.add(gtk.Label(_('Remind me later'))) self.later.connect('clicked', self.cb_cancel) self.reject = gtk.Button(stock=gtk.STOCK_REMOVE) self.reject.connect('clicked', self.cb_reject) self.addbutton = gtk.Button(stock=gtk.STOCK_ADD) self.addbutton.connect('clicked', self.cb_add) ## packing self.add(self.vbox) self.vbox.pack_start(self.hbox, True, True) self.vbox.pack_start(self.hboxbuttons, False, False) self.imagebox.pack_start(self.image) self.hbox.pack_start(self.imagebox, False, False) self.hbox.pack_start(self.vboxtext, True, True) self.vboxtext.pack_start(self.pages, False, False) self.vboxtext.pack_start(self.text, True, True) self.hboxbuttons.pack_start(self.quit, False, False) self.hboxbuttons.pack_start(self.later, False, False) self.hboxbuttons.pack_start(self.reject, False, False) self.hboxbuttons.pack_start(self.buttonbox) self.buttonbox.pack_start(self.addbutton) def _buildpages(self): '''Builds hboxpages, that is a bit complex to include in __init__''' hboxpages = gtk.HBox() arrowleft = gtk.Arrow(gtk.ARROW_LEFT, gtk.SHADOW_NONE) self.buttonleft = gtk.Button() self.buttonleft.set_relief(gtk.RELIEF_NONE) self.buttonleft.add(arrowleft) self.buttonleft.connect('clicked', self.switchmail, -1) arrowright = gtk.Arrow(gtk.ARROW_RIGHT, gtk.SHADOW_NONE) self.buttonright = gtk.Button() self.buttonright.set_relief(gtk.RELIEF_NONE) self.buttonright.add(arrowright) self.buttonright.connect('clicked', self.switchmail, 1) self.currentpage = gtk.Label() hboxpages.pack_start(gtk.Label(), True, True) # align to right hboxpages.pack_start(self.buttonleft, False, False) hboxpages.pack_start(self.currentpage, False, False) hboxpages.pack_start(self.buttonright, False, False) return hboxpages def append(self, nick, mail): '''Adds a new pending user''' self.mails.append((mail, gobject.markup_escape_text(nick))) self.update() self.show_all() self.present() def update(self): '''Update the GUI, including labels, arrow buttons, etc''' try: mail, nick = self.mails[self.pointer] except IndexError: self.destroy() return if nick != mail: mailstring = "<b>%s</b>\n<b>(%s)</b>" % (nick, mail) else: mailstring = '<b>%s</b>' % mail self.text.set_markup(mailstring + _(' has added you.\n' 'Do you want to add him/her to your contact list?')) self.buttonleft.set_sensitive(True) self.buttonright.set_sensitive(True) if self.pointer == 0: self.buttonleft.set_sensitive(False) if self.pointer == len(self.mails) - 1: self.buttonright.set_sensitive(False) self.currentpage.set_markup('<b>(%s/%s)</b>' % \ (self.pointer + 1, len(self.mails))) def switchmail(self, button, order): '''Moves the mail pointer +1 or -1''' if (self.pointer + order) >= 0: if (self.pointer + order) < len(self.mails): self.pointer += order else: self.pointer = 0 else: self.pointer = len(self.mails) - 1 self.update() def destroy(self, button=False): '''Called to destroy the window''' global dialogs dialogs.remove(self) self.callback({'accepted': self.accepted, 'rejected': self.rejected}) gtk.Window.destroy(self) def cb_delete(self, *args): '''Callback when the window is destroyed''' self.destroy() def cb_cancel(self, button): '''Callback when the cancel button is clicked''' self.mails.pop(self.pointer) self.switchmail(None, -1) def cb_reject(self, button): '''Callback when the view reject button is clicked''' mail, nick = self.mails[self.pointer] self.rejected.append(mail) self.mails.pop(self.pointer) self.switchmail(None, -1) def cb_add(self, button): '''Callback when the add button is clicked''' mail, nick = self.mails[self.pointer] self.accepted.append(mail) self.mails.pop(self.pointer) self.switchmail(None, -1) class ProgressWindow(gtk.Window): '''A class for a progressbar dialog''' def __init__(self, title, callback): '''Constructor. Packs widgets''' gtk.Window.__init__(self) global dialogs dialogs.append(self) self.set_title(title) self.set_role("dialog") self.buttoncancel = gtk.Button() self.buttoncancel.set_label(_("Cancel")) self.buttoncancel.connect('clicked', callback) self.connect('delete-event', callback) self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) self.set_default_size(300, 50) self.set_position(gtk.WIN_POS_CENTER) self.set_border_width(8) vbox = gtk.VBox() self.progressbar = gtk.ProgressBar() self.desclabel = gtk.Label() vbox.pack_start(self.desclabel) vbox.pack_start(self.progressbar) vbox.pack_start(self.buttoncancel) self.add(vbox) def destroy(self): '''override destroy method''' global dialogs dialogs.remove(self) gtk.Window.destroy(self) def update(self, progress): '''called when the progress is updated''' self.progressbar.set_fraction(progress / 100.0) self.progressbar.set_text("%d %s" % (progress, "%")) def set_action(self, action): '''called when the action changes''' self.desclabel.set_text(action) class WebWindow(gtk.Window): '''A class for a progressbar dialog''' def __init__(self, title, url, callback = None): '''Constructor. Packs widgets''' gtk.Window.__init__(self) global dialogs dialogs.append(self) self.set_title(title) self._callback = callback scroll = gtk.ScrolledWindow() bro = webkit.WebView() bro.set_size_request(350, 350) bro.connect('load-committed', self._load_committed_cb) bro.open(url) scroll.add(bro) self.add(scroll) def destroy(self): '''override destroy method''' global dialogs dialogs.remove(self) gtk.Window.destroy(self) def _load_committed_cb(self, web_view, frame): uri = frame.get_uri() if self._callback != None: self._callback(uri)
unknown
codeparrot/codeparrot-clean
import math import unittest import numpy import six import chainer from chainer import cuda from chainer import functions from chainer import gradient_check from chainer import testing from chainer.testing import attr from chainer.testing import condition class TestSoftmaxCrossEntropy(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32) self.t = numpy.random.randint(0, 3, (4,)).astype(numpy.int32) def check_forward(self, x_data, t_data, use_cudnn=True): x = chainer.Variable(x_data) t = chainer.Variable(t_data) loss = functions.softmax_cross_entropy(x, t, use_cudnn) self.assertEqual(loss.data.shape, ()) self.assertEqual(loss.data.dtype, numpy.float32) loss_value = float(cuda.to_cpu(loss.data)) # Compute expected value y = numpy.exp(self.x) loss_expect = 0.0 for i in six.moves.range(y.shape[0]): loss_expect -= math.log(y[i, self.t[i]] / y[i].sum()) loss_expect /= y.shape[0] self.assertAlmostEqual(loss_expect, loss_value, places=5) @condition.retry(3) def test_forward_cpu(self): self.check_forward(self.x, self.t) @attr.cudnn @condition.retry(3) def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t)) @attr.gpu @condition.retry(3) def test_forward_gpu_no_cudnn(self): self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False) def check_backward(self, x_data, t_data, use_cudnn=True): x = chainer.Variable(x_data) t = chainer.Variable(t_data) loss = functions.softmax_cross_entropy(x, t, use_cudnn) loss.backward() self.assertEqual(None, t.grad) func = loss.creator f = lambda: func.forward((x.data, t.data)) gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.02) gradient_check.assert_allclose(gx, x.grad, atol=1e-4) @condition.retry(3) def test_backward_cpu(self): self.check_backward(self.x, self.t) @attr.cudnn @condition.retry(3) def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t)) @attr.gpu @condition.retry(3) def test_backward_gpu_no_cudnn(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False) class TestReplicatedSoftmaxCrossEntropy1(TestSoftmaxCrossEntropy): def setUp(self): self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32) self.t = numpy.random.randint(0, 3, (4, 2)).astype(numpy.int32) def check_forward(self, x_data, t_data, use_cudnn=True): x = chainer.Variable(x_data) t = chainer.Variable(t_data) loss = functions.softmax_cross_entropy( x, t, use_cudnn, normalize=True) self.assertEqual(loss.data.shape, ()) self.assertEqual(loss.data.dtype, numpy.float32) loss_value = float(cuda.to_cpu(loss.data)) # Compute expected value y = numpy.exp(self.x) loss_expect = 0.0 for i in six.moves.range(y.shape[0]): for k in six.moves.range(y.shape[2]): loss_expect -= math.log( y[i, self.t[i, k], k] / y[i, :, k].sum()) loss_expect /= y.shape[0] * y.shape[2] self.assertAlmostEqual(loss_expect, loss_value, places=4) class TestReplicatedSoftmaxCrossEntropy2(TestSoftmaxCrossEntropy): def setUp(self): self.x = numpy.random.uniform( -1, 1, (4, 3, 2, 5)).astype(numpy.float32) self.t = numpy.random.randint(0, 3, (4, 2, 5)).astype(numpy.int32) def check_forward(self, x_data, t_data, use_cudnn=True): x = chainer.Variable(x_data) t = chainer.Variable(t_data) loss = functions.softmax_cross_entropy( x, t, use_cudnn, normalize=False) self.assertEqual(loss.data.shape, ()) self.assertEqual(loss.data.dtype, numpy.float32) loss_value = float(cuda.to_cpu(loss.data)) # Compute expected value y = numpy.exp(self.x) loss_expect = 0.0 for i in six.moves.range(y.shape[0]): for k in six.moves.range(y.shape[2]): for l in six.moves.range(y.shape[3]): loss_expect -= math.log( y[i, self.t[i, k, l], k, l] / y[i, :, k, l].sum()) loss_expect /= y.shape[0] self.assertAlmostEqual(loss_expect, loss_value, places=4) testing.run_module(__name__, __file__)
unknown
codeparrot/codeparrot-clean
/* * Copyright (c) 2007 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockitousage.bugs.creation; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.List; import java.util.Set; import org.junit.Test; import org.mockito.Mock; import org.mockitoutil.TestBase; // see issue 191 public class ShouldAllowInlineMockCreationTest extends TestBase { @Mock List list; @Test public void shouldAllowInlineMockCreation() { when(list.get(0)).thenReturn(mock(Set.class)); assertTrue(list.get(0) instanceof Set); } }
java
github
https://github.com/mockito/mockito
mockito-core/src/test/java/org/mockitousage/bugs/creation/ShouldAllowInlineMockCreationTest.java
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Wrapping / Unwrapping dataset variants.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.platform import test class WrapDatasetVariantTest(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate(test_base.default_test_combinations()) def testBasic(self): ds = dataset_ops.Dataset.range(100) ds_variant = ds._variant_tensor # pylint: disable=protected-access wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant) unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant(wrapped_variant) variant_ds = dataset_ops._VariantDataset(unwrapped_variant, ds.element_spec) get_next = self.getNext(variant_ds, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(get_next())) @combinations.generate(test_base.graph_only_combinations()) def testGPU(self): ds = dataset_ops.Dataset.range(100) ds_variant = ds._variant_tensor # pylint: disable=protected-access wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant) with ops.device("/gpu:0"): gpu_wrapped_variant = array_ops.identity(wrapped_variant) unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant( gpu_wrapped_variant) variant_ds = dataset_ops._VariantDataset(unwrapped_variant, ds.element_spec) iterator = dataset_ops.make_initializable_iterator(variant_ds) get_next = iterator.get_next() with self.cached_session(): self.evaluate(iterator.initializer) for i in range(100): self.assertEqual(i, self.evaluate(get_next)) if __name__ == "__main__": test.main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'WebHead' db.create_table('tinder_webhead', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=50)), )) db.send_create_signal('tinder', ['WebHead']) # Adding model 'MasterMap' db.create_table('tinder_mastermap', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mbdb.Master'])), ('webhead', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tinder.WebHead'])), ('logmount', self.gf('django.db.models.fields.CharField')(max_length=200)), )) db.send_create_signal('tinder', ['MasterMap']) def backwards(self, orm): # Deleting model 'WebHead' db.delete_table('tinder_webhead') # Deleting model 'MasterMap' db.delete_table('tinder_mastermap') models = { 'mbdb.master': { 'Meta': {'object_name': 'Master'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'tinder.mastermap': { 'Meta': {'object_name': 'MasterMap'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'logmount': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mbdb.Master']"}), 'webhead': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tinder.WebHead']"}) }, 'tinder.webhead': { 'Meta': {'object_name': 'WebHead'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'masters': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['mbdb.Master']", 'through': "orm['tinder.MasterMap']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['tinder']
unknown
codeparrot/codeparrot-clean
"""Tests for distutils.command.build_scripts.""" import os import unittest from distutils.command.build_scripts import build_scripts from distutils.core import Distribution from distutils import sysconfig from distutils.tests import support from test.support import run_unittest class BuildScriptsTestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): def test_default_settings(self): cmd = self.get_build_scripts_cmd("/foo/bar", []) self.assertFalse(cmd.force) self.assertIsNone(cmd.build_dir) cmd.finalize_options() self.assertTrue(cmd.force) self.assertEqual(cmd.build_dir, "/foo/bar") def test_build(self): source = self.mkdtemp() target = self.mkdtemp() expected = self.write_sample_scripts(source) cmd = self.get_build_scripts_cmd(target, [os.path.join(source, fn) for fn in expected]) cmd.finalize_options() cmd.run() built = os.listdir(target) for name in expected: self.assertIn(name, built) def get_build_scripts_cmd(self, target, scripts): import sys dist = Distribution() dist.scripts = scripts dist.command_obj["build"] = support.DummyCommand( build_scripts=target, force=1, executable=sys.executable ) return build_scripts(dist) def write_sample_scripts(self, dir): expected = [] expected.append("script1.py") self.write_script(dir, "script1.py", ("#! /usr/bin/env python2.3\n" "# bogus script w/ Python sh-bang\n" "pass\n")) expected.append("script2.py") self.write_script(dir, "script2.py", ("#!/usr/bin/python\n" "# bogus script w/ Python sh-bang\n" "pass\n")) expected.append("shell.sh") self.write_script(dir, "shell.sh", ("#!/bin/sh\n" "# bogus shell script w/ sh-bang\n" "exit 0\n")) return expected def write_script(self, dir, name, text): f = open(os.path.join(dir, name), "w") try: f.write(text) finally: f.close() def test_version_int(self): source = self.mkdtemp() target = self.mkdtemp() expected = self.write_sample_scripts(source) cmd = self.get_build_scripts_cmd(target, [os.path.join(source, fn) for fn in expected]) cmd.finalize_options() # http://bugs.python.org/issue4524 # # On linux-g++-32 with command line `./configure --enable-ipv6 # --with-suffix=3`, python is compiled okay but the build scripts # failed when writing the name of the executable old = sysconfig.get_config_vars().get('VERSION') sysconfig._config_vars['VERSION'] = 4 try: cmd.run() finally: if old is not None: sysconfig._config_vars['VERSION'] = old built = os.listdir(target) for name in expected: self.assertIn(name, built) def test_suite(): return unittest.makeSuite(BuildScriptsTestCase) if __name__ == "__main__": run_unittest(test_suite())
unknown
codeparrot/codeparrot-clean