content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from __future__ import unicode_literals import datetime from django.core.urlresolvers import reverse from tracpro.polls.models import Answer, PollRun, Response from tracpro.test.cases import TracProDataTest from ..models import BaselineTerm class TestBaselineTermCRUDL(TracProDataTest): def setUp(self): """ There will be a set of results for 3 contacts, in 2 regions self.contact1 and self.contact2 are in self.region1 self.contact4 is in self.region2 """ super(TestBaselineTermCRUDL, self).setUp() self.org = self.unicef self.baselineterm = BaselineTerm.objects.create( name='Baseline Term SetUp', org=self.org, start_date=datetime.date(2015, 5, 1), end_date=datetime.date(2015, 5, 1), baseline_poll=self.poll1, baseline_question=self.poll1_question1, follow_up_poll=self.poll1, follow_up_question=self.poll1_question2 ) self.data = { 'name': 'Test Baseline Term', 'org': self.org.pk, 'start_date': 'May 1, 2015', 'end_date': 'May 1, 2015', 'baseline_poll': self.poll1.pk, 'baseline_question': self.poll1_question1.pk, 'follow_up_poll': self.poll1.pk, 'follow_up_question': self.poll1_question2.pk, } def test_list(self): url_name = "baseline.baselineterm_list" self.login(self.admin) response = self.url_get('unicef', reverse(url_name)) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.context['object_list']), 1) def test_create(self): url = reverse('baseline.baselineterm_create') # Log in as an org administrator self.login(self.admin) response = self.url_get('unicef', url) self.assertEqual(response.status_code, 200) # Submit with no fields entered response = self.url_post('unicef', url, {}) self.assertEqual(response.status_code, 200) self.assertFormError(response, 'form', 'name', 'This field is required.') # Submit with form data response = self.url_post('unicef', url, self.data) self.assertEqual(response.status_code, 302) # Check new BaselineTerm created successfully baselineterm = BaselineTerm.objects.all().last() self.assertEqual(baselineterm.name, "Test Baseline Term") def test_delete(self): # Log in as an org administrator self.login(self.admin) # Delete baselineterm from setUp() response = self.url_post( 'unicef', reverse('baseline.baselineterm_delete', args=[self.baselineterm.pk])) # This should delete the single BaselineTerm and redirect self.assertEqual(response.status_code, 302) self.assertRedirects( response, 'http://unicef.testserver/indicators/', fetch_redirect_response=False) self.assertEqual(BaselineTerm.objects.all().count(), 0) def test_update(self): # Log in as an org administrator self.login(self.admin) url = reverse('baseline.baselineterm_update', args=[self.baselineterm.pk]) response = self.url_get('unicef', url) self.assertEqual(response.status_code, 200) self.data["name"] = "Baseline Term Updated" response = self.url_post('unicef', url, self.data) self.assertEqual(response.status_code, 302) # Check updated BaselineTerm baselineterm_updated = BaselineTerm.objects.get(pk=self.baselineterm.pk) self.assertRedirects( response, 'http://unicef.testserver/indicators/read/%d/' % self.baselineterm.pk, fetch_redirect_response=False) self.assertEqual(baselineterm_updated.name, "Baseline Term Updated") def test_read(self): # Log in as an org administrator self.login(self.admin) # Try to read the one BaselineTerm response = self.url_get( 'unicef', reverse('baseline.baselineterm_read', args=[self.baselineterm.pk])) self.assertEqual(response.status_code, 200) # Try to view BaselineTerm that does not exist fake_baselineterm_pk = self.baselineterm.pk + 100 response = self.url_get( 'unicef', reverse('baseline.baselineterm_read', args=[fake_baselineterm_pk])) self.assertEqual(response.status_code, 404) def test_data_spoof(self): # Turn on show_spoof_data for this org self.org.show_spoof_data = True self.org.save() url = reverse('baseline.baselineterm_data_spoof') # Log in as an org administrator self.login(self.admin) response = self.url_get('unicef', url) self.assertEqual(response.status_code, 200) # Submit with no fields entered response = self.url_post('unicef', url, {}) self.assertEqual(response.status_code, 200) self.assertFormError(response, 'form', 'contacts', 'This field is required.') spoof_data = { 'contacts': [self.contact1.pk], 'start_date': "May 1, 2015", 'end_date': "May 2, 2015", 'baseline_question': self.poll1_question1.pk, 'follow_up_question': self.poll1_question2.pk, 'baseline_minimum': 30, 'baseline_maximum': 40, 'follow_up_minimum': 10, 'follow_up_maximum': 20 } # Submit with valid form data response = self.url_post('unicef', url, spoof_data) self.assertEqual(response.status_code, 302) self.assertRedirects( response, 'http://unicef.testserver/indicators/', fetch_redirect_response=False) # Check new spoofed data created successfully # 3 PollRuns, Responses, and Answers # for 1 Baseline Date and 2 Follow Up Dates self.assertEqual(PollRun.objects.all().count(), 3) self.assertEqual(Response.objects.all().count(), 3) self.assertEqual(Answer.objects.all().count(), 3) def test_data_spoof_hide(self): # Turn off show_spoof_data for this org self.org.show_spoof_data = False self.org.save() url = reverse('baseline.baselineterm_data_spoof') # Log in as an org administrator self.login(self.admin) response = self.url_get('unicef', url) # We should not be able to spoof data self.assertEqual(response.status_code, 302) self.assertRedirects( response, 'http://unicef.testserver/indicators/', fetch_redirect_response=False)
nilq/baby-python
python
# Copyright (c) 2020 Spanish National Research Council # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. iso_map = { "C": "Coruña, A", "VI": "Araba/Álava", "AB": "Albacete", "A": "Alicante/Alacant", "AL": "Almería", "O": "Asturias", "AV": "Ávila", "BA": "Badajoz", "PM": "Balears, Illes", "B": "Barcelona", "BI": "Bizkaia", "BU": "Burgos", "CC": "Cáceres", "CA": "Cádiz", "S": "Cantabria", "CS": "Castellón/Castelló", "CE": "Ceuta", "CR": "Ciudad Real", "CO": "Córdoba", "CU": "Cuenca", "SS": "Gipuzkoa", "GI": "Girona", "GR": "Granada", "GU": "Guadalajara", "H": "Huelva", "HU": "Huesca", "J": "Jaén", "LO": "Rioja, La", "GC": "Palmas, Las", "LE": "León", "L": "Lleida", "LU": "Lugo", "M": "Madrid", "MA": "Málaga", "ML": "Melilla", "MU": "Murcia", "NA": "Navarra", "NC": "Navarra", # this is region's iso code, which appears by error in raw data of provinces "OR": "Ourense", "P": "Palencia", "PO": "Pontevedra", "SA": "Salamanca", "TF": "Santa Cruz de Tenerife", "SG": "Segovia", "SE": "Sevilla", "SO": "Soria", "T": "Tarragona", "TE": "Teruel", "TO": "Toledo", "V": "Valencia/València", "VA": "Valladolid", "ZA": "Zamora", "Z": "Zaragoza", } def add_province_info(df_orig, df_prov): df_orig.insert(1, "province id", 0) df_orig.insert(2, "province", 0) df_orig.insert(3, "region id", 0) df_orig.insert(4, "region", 0) # Homogenize codes, names, etc. using INE data df_orig["province"] = df_orig["province iso"].apply(iso_map.get) for p in df_orig["province"].unique(): # print("-", p) df_orig.loc[ df_orig["province"] == p, ("province id", "region", "region id") ] = ( df_prov.loc[df_prov["provincia"] == p][ ["id provincia", "autonomia", "id auto"] ].values[0] ) del df_orig['province iso']
nilq/baby-python
python
# flake8: noqa # Copyright (c) 2015 - 2017 Holger Nahrstaedt # Copyright (c) 2016-2017 The pyedflib Developers # <https://github.com/holgern/pyedflib> # See LICENSE for license details. from __future__ import division, print_function, absolute_import from ._extensions._pyedflib import * from .edfwriter import * from .edfreader import * from . import highlevel from . import data from pyedflib.version import version as __version__ from numpy.testing import Tester __all__ = [s for s in dir() if not s.startswith('_')] try: # In Python 2.x the name of the tempvar leaks out of the list # comprehension. Delete it to not make it show up in the main namespace. del s except NameError: pass test = Tester().test
nilq/baby-python
python
""" Holds functions responsible for objects validation across FAT-Forensics. """ # Author: Kacper Sokol <k.sokol@bristol.ac.uk> # License: new BSD import warnings from typing import Union import numpy as np import fatf.utils.tools as fut __all__ = ['is_numerical_dtype', 'is_textual_dtype', 'is_base_dtype', 'is_flat_dtype', 'are_similar_dtypes', 'are_similar_dtype_arrays', 'is_numerical_array', 'is_textual_array', 'is_base_array', 'is_1d_array', 'is_2d_array', 'is_structured_row', 'is_1d_like', 'is_structured_array'] # yapf: disable _NUMPY_VERSION = [int(i) for i in np.version.version.split('.')] _NUMPY_1_13 = fut.at_least_verion([1, 13], _NUMPY_VERSION) # Unsigned byte, Boolean, (signed) byte -- Boolean, unsigned integer, # (signed) integer, floating-point and complex-floating point. _NUMPY_NUMERICAL_KINDS = set('B?buifc') # Unicode string _NUMPY_TEXTUAL_KINDS = set('U') # Zero-terminated bytes _NUMPY_TEXTUAL_KINDS_UNSUPPORTED = set('Sa') # O, M, m and V are considered complex objects _NUMPY_BASE_KINDS = set('?buifcBSaU') def is_numerical_dtype(dtype: np.dtype) -> bool: """ Determines whether a numpy dtype object is of numerical type. Checks whether the ``dtype`` is of one of the following (numerical) types: unsigned byte, boolean, (signed) byte -- boolean, unsigned integer, (signed) integer, floating-point or complex-floating point. Parameters ---------- dtype : numpy.dtype The dtype to be checked. Raises ------ TypeError The input is not a numpy's dtype object. ValueError The dtype is structured -- this function only accepts plane dtypes. Returns ------- is_numerical : boolean True if the dtype is of a numerical type, False otherwise. """ if not isinstance(dtype, np.dtype): raise TypeError('The input should be a numpy dtype object.') # If the dtype is complex if dtype.names is not None: raise ValueError('The numpy dtype object is structured. ' 'Only base dtype are allowed.') is_numerical = dtype.kind in _NUMPY_NUMERICAL_KINDS return is_numerical def is_textual_dtype(dtype: np.dtype) -> bool: """ Determines whether a numpy dtype object is of textual type. Checks whether the ``dtype`` is a unicode string type (textual). The zero-terminated bytes type is unsupported and not considered a textual type. Parameters ---------- dtype : numpy.dtype The dtype to be checked. Raises ------ TypeError The input is not a numpy's dtype object. ValueError The dtype is structured -- this function only accepts plane dtypes. Returns ------- is_textual : boolean True if the dtype is of a textual type, False otherwise. """ if not isinstance(dtype, np.dtype): raise TypeError('The input should be a numpy dtype object.') # If the dtype is complex if dtype.names is not None: raise ValueError('The numpy dtype object is structured. ' 'Only base dtype are allowed.') if dtype.kind in _NUMPY_TEXTUAL_KINDS_UNSUPPORTED: warnings.warn( 'Zero-terminated bytes type is not supported and is not ' 'considered to be a textual type. Please use any other textual ' 'type.', category=UserWarning) is_textual = False else: is_textual = dtype.kind in _NUMPY_TEXTUAL_KINDS return is_textual def is_base_dtype(dtype: np.dtype) -> bool: """ Determines whether a numpy dtype object is one of base types. Checks whether the ``dtype`` is of any type but ``numpy.void`` -- this usually happens when a numpy array holds objects instead of base type entities. Parameters ---------- dtype : numpy.dtype The dtype to be checked. Raises ------ TypeError The input is not a numpy's dtype object. ValueError The dtype is structured -- this function only accepts plane dtypes. Returns ------- is_basic : boolean True if the dtype is of a base type, False otherwise. """ if not isinstance(dtype, np.dtype): raise TypeError('The input should be a numpy dtype object.') # If the dtype is complex if dtype.names is not None: raise ValueError('The numpy dtype object is structured. ' 'Only base dtype are allowed.') is_basic = dtype.kind in _NUMPY_BASE_KINDS return is_basic def is_flat_dtype(dtype: np.dtype) -> bool: """ Determines whether a numpy dtype object is flat. Checks whether the ``dtype`` just encodes one element or a shape. A dtype can characterise an array of other base types, which can then be embedded as an element of another array. Parameters ---------- dtype : numpy.dtype The dtype to be checked. Raises ------ TypeError The input is not a numpy's dtype object. ValueError The dtype is structured -- this function only accepts plane dtypes. Returns ------- is_flat : boolean True if the dtype is flat, False otherwise. """ if not isinstance(dtype, np.dtype): raise TypeError('The input should be a numpy dtype object.') # If the dtype is complex if dtype.names is not None: raise ValueError('The numpy dtype object is structured. ' 'Only base dtype are allowed.') # pylint: disable=len-as-condition if _NUMPY_1_13: # pragma: no cover is_flat = not bool(dtype.ndim) else: # pragma: no cover is_flat = len(dtype.shape) == 0 return is_flat def are_similar_dtypes(dtype_a: np.dtype, dtype_b: np.dtype, strict_comparison: bool = False) -> bool: """ Checks whether two numpy dtypes are similar. If ``strict_comparison`` is set to True the both dtypes have to be exactly the same. Otherwise, if both are either numerical or textual dtypes, they are considered similar. Parameters ---------- dtype_a : numpy.dtype The first dtype to be compared. dtype_b : numpy.dtype The second dtype to be compared. strict_comparison : boolean, optional (default=False) When set to True the dtypes have to match exactly. Otherwise, if both are either numerical or textual dtypes, they are considered similar. Raises ------ TypeError Either of the inputs is not a numpy's dtype object. ValueError Either of the input dtypes is structured -- this function only accepts plane dtypes. Returns ------- are_similar : boolean True if both dtypes are similar, False otherwise. """ if not isinstance(dtype_a, np.dtype): raise TypeError('dtype_a should be a numpy dtype object.') if not isinstance(dtype_b, np.dtype): raise TypeError('dtype_b should be a numpy dtype object.') if dtype_a.names is not None: raise ValueError('The dtype_a is a structured numpy dtype object. ' 'Only base dtype are allowed.') if dtype_b.names is not None: raise ValueError('The dtype_b is a structured numpy dtype object. ' 'Only base dtype are allowed.') are_similar = False if strict_comparison: are_similar = dtype_a == dtype_b else: if ((is_numerical_dtype(dtype_a) and is_numerical_dtype(dtype_b)) or (is_textual_dtype(dtype_a) and is_textual_dtype(dtype_b))): are_similar = True else: are_similar = dtype_a == dtype_b return are_similar def are_similar_dtype_arrays(array_a: np.ndarray, array_b: np.ndarray, strict_comparison: bool = False) -> bool: """ Determines whether two numpy array-like object have a similar data type. If ``strict_comparison`` is set to True the dtypes of both arrays have to be exactly the same. Otherwise, if both their dtypes are either numerical or textual dtypes, they are considered similar. If one of the arrays is a structured array and the other one is a classic numpy array the function returns False. Parameters ---------- array_a : numpy.ndarray The first array to be checked. array_b : numpy.ndarray The second array to be checked. strict_comparison : boolean, optional (default=False) When set to True the dtypes have to match exactly. Otherwise, if both are either numerical or textual dtypes, they are considered similar. Raises ------ TypeError Either of the inputs is not a numpy array-like object. Returns ------- are_similar : boolean True if both arrays have a similar dtype, False otherwise. """ if not isinstance(array_a, np.ndarray): raise TypeError('array_a should be a numpy array-like object.') if not isinstance(array_b, np.ndarray): raise TypeError('array_b should be a numpy array-like object.') is_a_structured = is_structured_array(array_a) is_b_structured = is_structured_array(array_b) if is_a_structured and is_b_structured: are_similar = True if len(array_a.dtype) != len(array_b.dtype): are_similar = False # Check names and types. if are_similar: for i in range(len(array_a.dtype)): are_similar = array_a.dtype.names[i] == array_b.dtype.names[i] if not are_similar: break are_similar = are_similar_dtypes( array_a.dtype[i], array_b.dtype[i], strict_comparison) if not are_similar: break elif not is_a_structured and not is_b_structured: are_similar = are_similar_dtypes(array_a.dtype, array_b.dtype, strict_comparison) else: are_similar = False return are_similar def is_numerical_array(array: np.ndarray) -> bool: """ Determines whether a numpy array-like object has a numerical data type. Checks whether the ``array`` is of one of the following (numerical) types: boolean, (signed) byte -- boolean, unsigned integer, (signed) integer, floating-point or complex-floating point. Parameters ---------- array : numpy.ndarray The array to be checked. Raises ------ TypeError The input array is not a numpy array-like object. Returns ------- is_numerical : boolean True if the array has a numerical data type, False otherwise. """ if not isinstance(array, np.ndarray): raise TypeError('The input should be a numpy array-like object.') if is_structured_array(array): is_numerical = True for i in range(len(array.dtype)): if not is_numerical_dtype(array.dtype[i]): is_numerical = False break else: is_numerical = is_numerical_dtype(array.dtype) return is_numerical def is_textual_array(array: np.ndarray) -> bool: """ Determines whether a numpy array-like object has a textual data type. Checks whether the ``array`` is a unicode string type (textual). The zero-terminated bytes type is unsupported and not considered a textual type. Parameters ---------- array : numpy.ndarray The array to be checked. Raises ------ TypeError The input array is not a numpy array-like object. Returns ------- is_textual : boolean True if the array has a textual data type, False otherwise. """ if not isinstance(array, np.ndarray): raise TypeError('The input should be a numpy array-like object.') if is_structured_array(array): is_textual = True for i in range(len(array.dtype)): if not is_textual_dtype(array.dtype[i]): is_textual = False break else: is_textual = is_textual_dtype(array.dtype) return is_textual def is_base_array(array: np.ndarray) -> bool: """ Determines whether a numpy array-like object holds base data types. Checks whether the ``array`` is of any type but ``numpy.void`` -- this usually happens when a numpy array holds objects instead of base type entities. Parameters ---------- array : numpy.ndarray The array to be checked. Raises ------ TypeError The input array is not a numpy array-like object. Returns ------- is_basic : boolean True if the array is of a base data type, False otherwise. """ if not isinstance(array, np.ndarray): raise TypeError('The input should be a numpy array-like object.') if is_structured_array(array): is_basic = True for i in range(len(array.dtype)): if not is_base_dtype(array.dtype[i]): is_basic = False break else: is_basic = is_base_dtype(array.dtype) return is_basic def is_1d_array(array: np.ndarray) -> bool: """ Determines whether a numpy array-like object is 1-dimensional. Parameters ---------- array : numpy.ndarray The array to be checked. Raises ------ TypeError The input array is not a numpy array-like object. Warns ----- UserWarning The input array is 1-dimensional but its components are 1D structured. Returns ------- is_1d : boolean True if the array is 1-dimensional, False otherwise. """ if not isinstance(array, np.ndarray): raise TypeError('The input should be a numpy array-like.') if is_structured_array(array): is_1d = False if len(array.dtype) == 1 and len(array.shape) == 1: message = ('Structured (pseudo) 1-dimensional arrays are not ' 'acceptable. A 1-dimensional structured numpy array ' 'can be expressed as a classic numpy array with a ' 'desired type.') warnings.warn(message, category=UserWarning) else: is_1d = len(array.shape) == 1 return is_1d def is_2d_array(array: np.ndarray) -> bool: """ Determines whether a numpy array-like object has 2 dimensions. Parameters ---------- array : numpy.ndarray The array to be checked. Raises ------ TypeError The input array is not a numpy array-like object. Warns ----- UserWarning The input array is 2-dimensional but its components are 1D structured. Returns ------- is_2d : boolean True if the array is 2-dimensional, False otherwise. """ if not isinstance(array, np.ndarray): raise TypeError('The input should be a numpy array-like.') if is_structured_array(array): # pylint: disable=len-as-condition if len(array.shape) == 2 and len(array.dtype) == 1: is_2d = False message = ('2-dimensional arrays with 1D structured elements are ' 'not acceptable. Such a numpy array can be expressed ' 'as a classic 2D numpy array with a desired type.') warnings.warn(message, category=UserWarning) elif len(array.shape) == 1 and len(array.dtype) > 0: is_2d = True for name in array.dtype.names: if not is_flat_dtype(array.dtype[name]): # This is a complex (multi-dimensional) embedded dtype is_2d = False break else: is_2d = False else: is_2d = len(array.shape) == 2 return is_2d def is_structured_row(structured_row: np.void) -> bool: """ Determines whether the input is a structured numpy array's row object. Parameters ---------- structured_row : numpy.void The object to be checked. Raises ------ TypeError The input is not a structured numpy array's row object. Returns ------- is_structured_row : boolean True if the input is array is a structured numpy array's row object, False otherwise. """ if not isinstance(structured_row, np.void): raise TypeError('The input should be a row of a structured numpy ' 'array (numpy.void type).') return len(structured_row.dtype) != 0 def is_1d_like(oned_like_object: Union[np.ndarray, np.void]) -> bool: """ Checks if the input is either a 1D numpy array or a structured numpy row. Parameters ---------- oned_like_object : Union[numpy.ndarray, numpy.void] The object to be checked. Raises ------ TypeError The input is neither a numpy ndarray -- array-like object -- nor a numpy void -- a row of a structured numpy array. Returns ------- is_1d_like_array : boolean True if the input is either a 1-dimensional numpy array or a row of a structured numpy array, False otherwise. """ is_1d_like_array = False if isinstance(oned_like_object, np.void): is_1d_like_array = is_structured_row(oned_like_object) elif isinstance(oned_like_object, np.ndarray): is_1d_like_array = is_1d_array(oned_like_object) else: raise TypeError('The input should either be a numpy array-like object ' '(numpy.ndarray) or a row of a structured numpy array ' '(numpy.void).') return is_1d_like_array def is_structured_array(array: np.ndarray) -> bool: """ Determines whether a numpy array-like object is a structured array. Parameters ---------- array : numpy.ndarray The array to be checked. Raises ------ TypeError The input array is not a numpy array-like object. Returns ------- is_structured : boolean True if the array is a structured array, False otherwise. """ if not isinstance(array, np.ndarray): raise TypeError('The input should be a numpy array-like.') return len(array.dtype) != 0
nilq/baby-python
python
'''Validação de URL com POO Pontos de Obsevação em uma URL: caracteres padrões → "?", "&", "https://", "http://", "www." ''' import re class ExtratorURL: def __init__(self, url): self.url = self.clear_url(url) self.url_validation() def clear_url(self, url): if type(url) == str: return url.strip() else: return '' def url_validation(self): if not self.url: # Para verificar se a url esta vazia ou não, raise ValueError('A URL está vazia') padraoURL = re.compile('(http(s)?://)(www.)?bytebank.com(.br)?(/cambio)?') match = padraoURL.match(self.url.lower().strip()) if not match: raise ValueError('URL não é VÁLIDA') def get_url_base(self): interrogacaoLocal = self.url.find('?') urlBase = self.url[:interrogacaoLocal] return urlBase def get_url_parameter(self): interrogacaoLocal = self.url.find('?') urlParameter = self.url[interrogacaoLocal + 1:] return urlParameter def get_parameter_value(self, parameterName): localParameter = self.get_url_parameter().find(parameterName) parameterIndex = localParameter + len(parameterName) + 1 # Localizador do parâmetro divParameter = self.get_url_parameter().find('&', parameterIndex) if divParameter == -1: return self.get_url_parameter()[parameterIndex:] else: return self.get_url_parameter()[parameterIndex:divParameter] def __len__(self): return len(self.url) def __str__(self): print() return f'A URL é: {self.url}\nBase: {self.get_url_base()}\nParâmetros: {self.get_url_parameter()}\n' \ f'Tamnho URL: {len(self.url)} chars\n' def __eq__(self, other): return self.url == other.url extratorURL = ExtratorURL(input('Copie ou digite a URL: ').lower().strip()) print(extratorURL) parameterName = 'quantidade' print(f'O parâmetro "{parameterName.upper()}" é igual à \033[1;33;40m{extratorURL.get_parameter_value(parameterName)}\033[m')
nilq/baby-python
python
#!/usr/bin/env python3 project = "stories" copyright = "2018, Artem Malyshev" author = "Artem Malyshev" version = "0.9" release = "0.9" templates_path = ["templates"] source_suffix = ".rst" master_doc = "index" language = None exclude_patterns = ["_build"] pygments_style = "sphinx" html_theme = "alabaster" html_static_path = ["static"] html_sidebars = { "**": [ "sidebarlogo.html", "stats.html", "globaltoc.html", "relations.html", "updates.html", "links.html", "searchbox.html", "image_popup.html", "gitter_sidecar.html", ] } html_theme_options = { "show_powered_by": False, "show_related": True, "show_relbars": True, "description": "Business transaction DSL. It provides a simple way to define a complex business transaction that includes processing by many different objects.", # noqa: E501 "github_user": "dry-python", "github_repo": "stories", "github_type": "star", "github_count": True, "github_banner": True, }
nilq/baby-python
python
from transformers import AutoModelWithLMHead, AutoTokenizer def run_gpt2(gpt2_input): tokenizer = AutoTokenizer.from_pretrained('gpt2') model = AutoModelWithLMHead.from_pretrained('gpt2') sequence = gpt2_input input = tokenizer.encode(sequence, return_tensors='pt') generated = model.generate(input, max_length=250, do_sample=True) resulting_string = tokenizer.decode(generated.tolist()[0]) return resulting_string.replace(sequence,'')
nilq/baby-python
python
# coding=utf-8 # Copyright 2018 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A batched replay buffer of nests of Tensors which can be sampled uniformly. - Each add assumes tensors have batch_size as first dimension, and will store each element of the batch in an offset segment, so that each batch dimension has its own contiguous memory. Within batch segments, behaves as a circular buffer. The get_next function returns 'ids' in addition to the data. This is not really needed for the batched replay buffer, but is returned to be consistent with the API for a priority replay buffer, which needs the ids to update priorities. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import gin import numpy as np import tensorflow as tf from tf_agents.replay_buffers import replay_buffer from tf_agents.replay_buffers import table from tf_agents.specs import tensor_spec from tf_agents.utils import common BufferInfo = collections.namedtuple('BufferInfo', ['ids', 'probabilities']) @gin.configurable class TFUniformReplayBuffer(replay_buffer.ReplayBuffer): """A TFUniformReplayBuffer with batched adds and uniform sampling.""" def __init__(self, data_spec, batch_size, max_length=1000, scope='TFUniformReplayBuffer', device='cpu:*', table_fn=table.Table, dataset_drop_remainder=False, dataset_window_shift=None, stateful_dataset=False): """Creates a TFUniformReplayBuffer. The TFUniformReplayBuffer stores episodes in `B == batch_size` blocks of size `L == max_length`, with total frame capacity `C == L * B`. Storage looks like: ``` block1 ep1 frame1 frame2 ... ep2 frame1 frame2 ... <L frames total> block2 ep1 frame1 frame2 ... ep2 frame1 frame2 ... <L frames total> ... blockB ep1 frame1 frame2 ... ep2 frame1 frame2 ... <L frames total> ``` Multiple episodes may be stored within a given block, up to `max_length` frames total. In practice, new episodes will overwrite old ones as the block rolls over its `max_length`. Args: data_spec: A TensorSpec or a list/tuple/nest of TensorSpecs describing a single item that can be stored in this buffer. batch_size: Batch dimension of tensors when adding to buffer. max_length: The maximum number of items that can be stored in a single batch segment of the buffer. scope: Scope prefix for variables and ops created by this class. device: A TensorFlow device to place the Variables and ops. table_fn: Function to create tables `table_fn(data_spec, capacity)` that can read/write nested tensors. dataset_drop_remainder: If `True`, then when calling `as_dataset` with arguments `single_deterministic_pass=True` and `sample_batch_size is not None`, the final batch will be dropped if it does not contain exactly `sample_batch_size` items. This is helpful for static shape inference as the resulting tensors will always have leading dimension `sample_batch_size` instead of `None`. dataset_window_shift: Window shift used when calling `as_dataset` with arguments `single_deterministic_pass=True` and `num_steps is not None`. This determines how the resulting frames are windowed. If `None`, then there is no overlap created between frames and each frame is seen exactly once. For example, if `max_length=5`, `num_steps=2`, `sample_batch_size=None`, and `dataset_window_shift=None`, then the datasets returned will have frames `{[0, 1], [2, 3], [4]}`. If `num_steps is not None`, then windows are created with a window overlap of `dataset_window_shift` and you will see each frame up to `num_steps` times. For example, if `max_length=5`, `num_steps=2`, `sample_batch_size=None`, and `dataset_window_shift=1`, then the datasets returned will have windows of shifted repeated frames: `{[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]}`. For more details, see the documentation of `tf.data.Dataset.window`, specifically for the `shift` argument. The default behavior is to not overlap frames (`dataset_window_shift=None`) but users often want to see all combinations of frame sequences, in which case `dataset_window_shift=1` is the appropriate value. stateful_dataset: whether the dataset contains stateful ops or not. """ self._batch_size = batch_size self._max_length = max_length capacity = self._batch_size * self._max_length super(TFUniformReplayBuffer, self).__init__( data_spec, capacity, stateful_dataset) self._id_spec = tensor_spec.TensorSpec([], dtype=tf.int64, name='id') self._capacity_value = np.int64(self._capacity) self._batch_offsets = ( tf.range(self._batch_size, dtype=tf.int64) * self._max_length) self._scope = scope self._device = device self._table_fn = table_fn self._dataset_drop_remainder = dataset_drop_remainder self._dataset_window_shift = dataset_window_shift with tf.device(self._device), tf.compat.v1.variable_scope(self._scope): self._capacity = tf.constant(capacity, dtype=tf.int64) self._data_table = table_fn(self._data_spec, self._capacity_value) self._id_table = table_fn(self._id_spec, self._capacity_value) self._last_id = common.create_variable('last_id', -1) self._last_id_cs = tf.CriticalSection(name='last_id') def variables(self): return (self._data_table.variables() + self._id_table.variables() + [self._last_id]) @property def device(self): return self._device @property def table_fn(self): return self._table_fn @property def scope(self): return self._scope # Methods defined in ReplayBuffer base class def _num_frames(self): num_items_single_batch_segment = self._get_last_id() + 1 total_frames = num_items_single_batch_segment * self._batch_size return tf.minimum(total_frames, self._capacity) def _add_batch(self, items): """Adds a batch of items to the replay buffer. Args: items: A tensor or list/tuple/nest of tensors representing a batch of items to be added to the replay buffer. Each element of `items` must match the data_spec of this class. Should be shape [batch_size, data_spec, ...] Returns: An op that adds `items` to the replay buffer. Raises: ValueError: If called more than once. """ tf.nest.assert_same_structure(items, self._data_spec) with tf.device(self._device), tf.name_scope(self._scope): id_ = self._increment_last_id() write_rows = self._get_rows_for_id(id_) write_id_op = self._id_table.write(write_rows, id_) write_data_op = self._data_table.write(write_rows, items) return tf.group(write_id_op, write_data_op) def _get_next(self, sample_batch_size=None, num_steps=None, time_stacked=True): """Returns an item or batch of items sampled uniformly from the buffer. Sample transitions uniformly from replay buffer. When sub-episodes are desired, specify num_steps, although note that for the returned items to truly be sub-episodes also requires that experience collection be single-threaded. Args: sample_batch_size: (Optional.) An optional batch_size to specify the number of items to return. See get_next() documentation. num_steps: (Optional.) Optional way to specify that sub-episodes are desired. See get_next() documentation. time_stacked: Bool, when true and num_steps > 1 get_next on the buffer would return the items stack on the time dimension. The outputs would be [B, T, ..] if sample_batch_size is given or [T, ..] otherwise. Returns: A 2 tuple, containing: - An item, sequence of items, or batch thereof sampled uniformly from the buffer. - BufferInfo NamedTuple, containing: - The items' ids. - The sampling probability of each item. Raises: ValueError: if num_steps is bigger than the capacity. """ with tf.device(self._device), tf.name_scope(self._scope): with tf.name_scope('get_next'): min_val, max_val = _valid_range_ids( self._get_last_id(), self._max_length, num_steps) rows_shape = () if sample_batch_size is None else (sample_batch_size,) assert_nonempty = tf.compat.v1.assert_greater( max_val, min_val, message='TFUniformReplayBuffer is empty. Make sure to add items ' 'before sampling the buffer.') with tf.control_dependencies([assert_nonempty]): num_ids = max_val - min_val probability = tf.cond( pred=tf.equal(num_ids, 0), true_fn=lambda: 0., false_fn=lambda: 1. / tf.cast(num_ids * self._batch_size, # pylint: disable=g-long-lambda tf.float32)) ids = tf.random.uniform( rows_shape, minval=min_val, maxval=max_val, dtype=tf.int64) # Move each id sample to a random batch. batch_offsets = tf.random.uniform( rows_shape, minval=0, maxval=self._batch_size, dtype=tf.int64) batch_offsets *= self._max_length ids += batch_offsets if num_steps is None: rows_to_get = tf.math.mod(ids, self._capacity) data = self._data_table.read(rows_to_get) data_ids = self._id_table.read(rows_to_get) else: if time_stacked: step_range = tf.range(num_steps, dtype=tf.int64) if sample_batch_size: step_range = tf.reshape(step_range, [1, num_steps]) step_range = tf.tile(step_range, [sample_batch_size, 1]) ids = tf.tile(tf.expand_dims(ids, -1), [1, num_steps]) else: step_range = tf.reshape(step_range, [num_steps]) rows_to_get = tf.math.mod(step_range + ids, self._capacity) data = self._data_table.read(rows_to_get) data_ids = self._id_table.read(rows_to_get) else: data = [] data_ids = [] for step in range(num_steps): steps_to_get = tf.math.mod(ids + step, self._capacity) items = self._data_table.read(steps_to_get) data.append(items) data_ids.append(self._id_table.read(steps_to_get)) data = tuple(data) data_ids = tuple(data_ids) probabilities = tf.fill(rows_shape, probability) buffer_info = BufferInfo(ids=data_ids, probabilities=probabilities) return data, buffer_info @gin.configurable( 'tf_agents.tf_uniform_replay_buffer.TFUniformReplayBuffer.as_dataset') def as_dataset(self, sample_batch_size=None, num_steps=None, num_parallel_calls=None, single_deterministic_pass=False): return super(TFUniformReplayBuffer, self).as_dataset( sample_batch_size, num_steps, num_parallel_calls, single_deterministic_pass=single_deterministic_pass) def _as_dataset(self, sample_batch_size=None, num_steps=None, num_parallel_calls=None): """Creates a dataset that returns entries from the buffer in shuffled order. Args: sample_batch_size: (Optional.) An optional batch_size to specify the number of items to return. See as_dataset() documentation. num_steps: (Optional.) Optional way to specify that sub-episodes are desired. See as_dataset() documentation. num_parallel_calls: (Optional.) Number elements to process in parallel. See as_dataset() documentation. Returns: A dataset of type tf.data.Dataset, elements of which are 2-tuples of: - An item or sequence of items or batch thereof - Auxiliary info for the items (i.e. ids, probs). """ def get_next(_): return self.get_next(sample_batch_size, num_steps, time_stacked=True) dataset = tf.data.experimental.Counter().map( get_next, num_parallel_calls=num_parallel_calls) return dataset def _single_deterministic_pass_dataset(self, sample_batch_size=None, num_steps=None, num_parallel_calls=None): """Creates a dataset that returns entries from the buffer in fixed order. Args: sample_batch_size: (Optional.) An optional batch_size to specify the number of items to return. See as_dataset() documentation. num_steps: (Optional.) Optional way to specify that sub-episodes are desired. See as_dataset() documentation. num_parallel_calls: (Optional.) Number elements to process in parallel. See as_dataset() documentation. Returns: A dataset of type tf.data.Dataset, elements of which are 2-tuples of: - An item or sequence of items or batch thereof - Auxiliary info for the items (i.e. ids, probs). Raises: ValueError: If `dataset_drop_remainder` is set, and `sample_batch_size > self.batch_size`. In this case all data will be dropped. """ static_size = tf.get_static_value(sample_batch_size) static_num_steps = tf.get_static_value(num_steps) static_self_batch_size = tf.get_static_value(self._batch_size) static_self_max_length = tf.get_static_value(self._max_length) if (self._dataset_drop_remainder and static_size is not None and static_self_batch_size is not None and static_size > static_self_batch_size): raise ValueError( 'sample_batch_size ({}) > self.batch_size ({}) and ' 'dataset_drop_remainder is True. In ' 'this case, ALL data will be dropped by the deterministic dataset.' .format(static_size, static_self_batch_size)) if (self._dataset_drop_remainder and static_num_steps is not None and static_self_max_length is not None and static_num_steps > static_self_max_length): raise ValueError( 'num_steps_size ({}) > self.max_length ({}) and ' 'dataset_drop_remainder is True. In ' 'this case, ALL data will be dropped by the deterministic dataset.' .format(static_num_steps, static_self_max_length)) def get_row_ids(_): """Passed to Dataset.range(self._batch_size).flat_map(.), gets row ids.""" with tf.device(self._device), tf.name_scope(self._scope): with tf.name_scope('single_deterministic_pass_dataset'): # Here we pass num_steps=None because _valid_range_ids uses # num_steps to determine a hard stop when sampling num_steps starting # from the returned indices. But in our case, we want all the indices # and we'll use TF dataset's window() mechanism to get # num_steps-length blocks. The window mechanism handles this stuff # for us. min_frame_offset, max_frame_offset = _valid_range_ids( self._get_last_id(), self._max_length, num_steps=None) # With auto-deps the top-level return of assert_less is not touched, # even though the operation is executed. So we add a mark_used call. tf.compat.v1.assert_less( min_frame_offset, max_frame_offset, message='TFUniformReplayBuffer is empty. Make sure to add items ' 'before asking the buffer for data.').mark_used() min_max_frame_range = tf.range(min_frame_offset, max_frame_offset) drop_remainder = self._dataset_drop_remainder window_shift = self._dataset_window_shift def group_windows(ds_): return ds_.batch(num_steps, drop_remainder=drop_remainder) if sample_batch_size is None: def row_ids(b): # Create a vector of shape [num_frames] and slice it along each # frame. ids = tf.data.Dataset.from_tensor_slices( b * self._max_length + min_max_frame_range) if num_steps is not None: ids = (ids.window(num_steps, shift=window_shift) .flat_map(group_windows)) return ids return tf.data.Dataset.range(self._batch_size).flat_map(row_ids) else: def batched_row_ids(batch): # Create a matrix of indices shaped [num_frames, batch_size] # and slice it along each frame row to get groups of batches # for frame 0, frame 1, ... return tf.data.Dataset.from_tensor_slices( (min_max_frame_range[:, tf.newaxis] + batch * self._max_length)) indices_ds = ( tf.data.Dataset.range(self._batch_size) .batch(sample_batch_size, drop_remainder=drop_remainder) .flat_map(batched_row_ids)) if num_steps is not None: # We have sequences of num_frames rows shaped [sample_batch_size]. # Window and group these to rows of shape # [num_steps, sample_batch_size], then # transpose them to get index tensors of shape # [sample_batch_size, num_steps]. indices_ds = (indices_ds.window(num_steps, shift=window_shift) .flat_map(group_windows) .map(tf.transpose)) return indices_ds # Get our indices as a dataset; each time we reinitialize the iterator we # update our min/max id bounds from the state of the replay buffer. ds = tf.data.Dataset.range(1).flat_map(get_row_ids) def get_data(id_): with tf.device(self._device), tf.name_scope(self._scope): with tf.name_scope('single_deterministic_pass_dataset'): data = self._data_table.read(id_ % self._capacity) buffer_info = BufferInfo(ids=id_, probabilities=()) return (data, buffer_info) # Deterministic even though num_parallel_calls > 1. Operations are # run in parallel but then the results are returned in original stream # order. ds = ds.map(get_data, num_parallel_calls=num_parallel_calls) return ds def _gather_all(self): """Returns all the items in buffer, shape [batch_size, timestep, ...]. Returns: All the items currently in the buffer. """ with tf.device(self._device), tf.name_scope(self._scope): with tf.name_scope('gather_all'): # Make ids, repeated over batch_size. Shape [batch_size, num_ids, ...]. min_val, max_val = _valid_range_ids( self._get_last_id(), self._max_length) ids = tf.range(min_val, max_val) ids = tf.stack([ids] * self._batch_size) rows = tf.math.mod(ids, self._max_length) # Make batch_offsets, shape [batch_size, 1], then add to rows. batch_offsets = tf.expand_dims( tf.range(self._batch_size, dtype=tf.int64) * self._max_length, 1) rows += batch_offsets # Expected shape is [batch_size, max_length, ...]. data = self._data_table.read(rows) return data def _clear(self, clear_all_variables=False): """Return op that resets the contents of replay buffer. Args: clear_all_variables: boolean indicating if all variables should be cleared. By default, table contents will be unlinked from replay buffer, but values are unmodified for efficiency. Set `clear_all_variables=True` to reset all variables including Table contents. Returns: op that clears or unlinks the replay buffer contents. """ table_vars = self._data_table.variables() + self._id_table.variables() def _init_vars(): assignments = [self._last_id.assign(-1)] if clear_all_variables: assignments += [v.assign(tf.zeros_like(v)) for v in table_vars] return tf.group(*assignments, name='clear') return self._last_id_cs.execute(_init_vars) # Helper functions. def _increment_last_id(self, increment=1): """Increments the last_id in a thread safe manner. Args: increment: amount to increment last_id by. Returns: An op that increments the last_id. """ def _assign_add(): return self._last_id.assign_add(increment).value() return self._last_id_cs.execute(_assign_add) def _get_last_id(self): def last_id(): return self._last_id.value() return self._last_id_cs.execute(last_id) def _get_rows_for_id(self, id_): """Make a batch_size length list of tensors, with row ids for write.""" id_mod = tf.math.mod(id_, self._max_length) rows = self._batch_offsets + id_mod return rows def _valid_range_ids(last_id, max_length, num_steps=None): """Returns the [min_val, max_val) range of ids. When num_steps is provided, [min_val, max_val+num_steps) are also valid ids. Args: last_id: The last id added to the buffer. max_length: The max length of each batch segment in the buffer. num_steps: Optional way to specify that how many ids need to be valid. Returns: A tuple (min_id, max_id) for the range [min_id, max_id) of valid ids. """ if num_steps is None: num_steps = tf.constant(1, tf.int64) min_id_not_full = tf.constant(0, dtype=tf.int64) max_id_not_full = tf.maximum(last_id + 1 - num_steps + 1, 0) min_id_full = last_id + 1 - max_length max_id_full = last_id + 1 - num_steps + 1 return (tf.where(last_id < max_length, min_id_not_full, min_id_full), tf.where(last_id < max_length, max_id_not_full, max_id_full))
nilq/baby-python
python
import numpy as np from protosc.model.utils import train_xvalidate, create_clusters, select_features from protosc.model.filter import FilterModel from protosc.simulation import create_correlated_data, create_independent_data from protosc.feature_matrix import FeatureMatrix def get_test_matrix(n_row=100, n_col=50): X = np.zeros((n_row, n_col)) X = X + np.arange(n_row).reshape(n_row, 1) X = X + np.arange(n_col).reshape(1, n_col)/1000 y = np.random.randint(2, size=n_row) return FeatureMatrix(X), y def test_select_fold(): n_fold = 5 n_row = 100 n_col = 50 X, y = get_test_matrix(n_row, n_col) rng = np.random.default_rng() for X_train, y_train, X_val, y_val in X.kfold(y, n_fold, rng, balance=False): assert np.allclose(X_train.shape, ((n_fold-1)/n_fold*n_row, n_col)) assert len(y_train) == X_train.shape[0] assert np.allclose(X_val.shape, 1/n_fold*n_row, n_col) assert len(y_val) == X_val.shape[0] assert len(np.unique(X_train[:])) == X_train.size assert len(np.unique(X_val[:])) == X_val.size for X_train, y_train, X_val, y_val in X.kfold(y, n_fold, rng, balance=True): assert np.sum(y_train) == len(y_train)/2 assert np.sum(y_val) == len(y_val)/2 assert len(np.unique(X_train[:])) == X_train.size assert len(np.unique(X_val[:])) == X_val.size assert isinstance( train_xvalidate(X_train[:], y_train, X_val[:], y_val), float) def test_select_clusters(): X, _, truth = create_correlated_data() X = FeatureMatrix.from_matrix(X) features_sorted = np.random.permutation(X.shape[1]) cluster_groups = create_clusters(features_sorted, X) for cluster in cluster_groups: assert np.all(np.array( truth["clusters"][cluster]) == truth["clusters"][cluster][0]) def test_select_features(): X, y, _ = create_independent_data() selected_features, clusters = select_features(X, y) assert isinstance(selected_features, list) assert isinstance(clusters, list)
nilq/baby-python
python
# -*- coding: utf-8 -*- import base64 import hashlib import math import time from datetime import datetime # from ccxt.base.errors import AuthenticationError, InvalidOrder from ccxt.base.errors import ExchangeError from ccxt.base.exchange import Exchange class qtrade (Exchange): def describe(self): return self.deep_extend(super(qtrade, self).describe(), { 'id': 'qtrade', 'name': 'qTrade', 'countries': ['US'], 'rateLimit': 100, # 'has': { # 'fetchCurrencies': True, # 'fetchTickers': True, # 'fetchOpenOrders': True, # 'fetchMyTrades': True, # 'fetchDepositAddress': True, # }, 'urls': { 'logo': 'hhttps://qtrade.io/images/logo.png', 'api': 'https://api.qtrade.io/v1', 'www': 'https://qtrade.io/', 'doc': 'https://qtrade-exchange.github.io/qtrade-docs/', 'fees': 'https://qtrade.io/fees', 'referral': 'https://qtrade.io/?ref=AZCXUQ6P5KCG', }, 'api': { 'public': { 'get': [ 'markets', 'market/{market_id}', 'currencies', 'tickers', # 'ticker/{market_string}', # NOTE: dont implement 'ticker_by_id/{market_id}', # 'orderbook/{market_string}', # NOTE: dont implement 'orderbook_by_id/{market_id}', # NOTE: dont implement 'market/{market_id}/ohlcv/{interval}', ], }, 'private': { 'get': [ # 'user/me', # NOTE: dont implement 'user/balances', 'user/market/{market_id}', 'user/orders', 'user/order/{order_id}', 'user/withdraws', 'user/withdraw/{withdraw_id}', 'user/deposits', # 'user/deposit/{deposit_id}', # NOTE: This endpoint currently non-functional 'user/transfers' # NOTE: Returns a list of the user's Transfers and metadata. ], 'post': [ 'user/cancel_order', # 'user/deposit_address/{currency}' # NOTE: dont implement 'user/sell_limit', 'user/buy_limit', ], }, }, # 'commonCurrencies': { # 'EPC': 'Epacoin', # }, 'fees': { 'trading': { 'maker': 0.005, 'taker': 0.005, }, }, 'precision': { 'amount': 6, 'price': 8, }, }) # def fetch_currencies(self, params={}): # currencies = self.publicGetCurrencies(params) # ids = list(currencies.keys()) # result = {} # for i in range(0, len(ids)): # id = ids[i] # currency = currencies[id] # precision = self.safe_integer(currency, 'decimal') # uppercase = id.upper() # code = self.common_currency_code(uppercase) # active = self.safe_integer(currency, 'active') == 1 # maintenance = self.safe_integer(currency, 'under_maintenance') # if maintenance != 0: # active = False # canWithdraw = self.safe_integer(currency, 'is_withdrawal_active') == 1 # canDeposit = self.safe_integer(currency, 'is_deposit_active') == 1 # if not canWithdraw or not canDeposit: # active = False # result[code] = { # 'id': id, # 'code': code, # 'name': currency['name'], # 'active': active, # 'precision': precision, # 'funding': { # 'withdraw': { # 'active': canWithdraw, # 'fee': self.safe_float(currency, 'txWithdrawalFee'), # }, # 'deposit': { # 'active': canDeposit, # 'fee': self.safe_float(currency, 'txDepositFee'), # }, # }, # 'limits': { # 'amount': { # 'min': self.safe_float(currency, 'minAmountTrade'), # 'max': math.pow(10, precision), # }, # 'price': { # 'min': math.pow(10, -precision), # 'max': math.pow(10, precision), # }, # 'cost': { # 'min': None, # 'max': None, # }, # 'withdraw': { # 'min': self.safe_float(currency, 'MinWithdrawal'), # 'max': math.pow(10, precision), # }, # 'deposit': { # 'min': self.safe_float(currency, 'minDeposit'), # 'max': None, # }, # }, # 'info': currency, # } # return result def fetch_markets(self, params={}): markets = self.publicGetMarkets()['data']['markets'] result = [] for market in markets: id = market['id'] baseId = market['market_currency'] quoteId = market['base_currency'] base = self.common_currency_code(baseId) quote = self.common_currency_code(quoteId) symbol = base + '/' + quote active = market['can_trade'] precision = self.precision result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, 'active': active, 'precision': precision, 'limits': { 'amount': { 'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount']), }, 'price': { 'min': math.pow(10, -precision['price']), 'max': math.pow(10, precision['price']), }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result def parse_ticker(self, ticker, market=None): symbol = market['symbol'] timestamp = ticker['date'] ticker = ticker['ticker'] last = self.safe_float(ticker, 'last') open_price = self.safe_float(ticker, 'open') if last and open_price: change = last - open_price else: change = None if self.safe_float(ticker, 'day_change'): percentage = self.safe_float(ticker, 'day_change') * 100 else: percentage = None return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_float(ticker, 'day_high'), 'low': self.safe_float(ticker, 'day_low'), 'bid': self.safe_float(ticker, 'bid'), 'bidVolume': None, 'ask': self.safe_float(ticker, 'ask'), 'askVolume': None, 'vwap': None, 'open': self.safe_float(ticker, 'day_open'), 'close': last, 'last': last, 'previousClose': None, 'change': change, 'percentage': percentage, 'average': self.safe_float(ticker, 'day_avg_price'), 'baseVolume': self.safe_float(ticker, 'day_volume_market'), 'quoteVolume': self.safe_float(ticker, 'day_volume_base'), 'info': ticker, } def fetch_ticker(self, symbol, params={}): self.load_markets() market = self.market(symbol) ticker = self.publicGetTickerByIdMarketId(self.extend({ 'market_id': market['id'], }, params))['data'] ticker = { 'date': self.milliseconds(), 'ticker': ticker, } return self.parse_ticker(ticker, market) def fetch_tickers(self, symbols=None, params={}): self.load_markets() tickers = self.publicGetTickers(params)['data']['markets'] result = {} timestamp = self.milliseconds() for ticker in tickers: market = self.markets_by_id[ticker['id']] symbol = market['symbol'] ticker = { 'date': timestamp, 'ticker': ticker, } result[symbol] = self.parse_ticker(ticker, market) return result def fetch_order_book(self, symbol, limit=None, params={}): self.load_markets() timestamp = self.milliseconds() orderbook = self.publicGetOrderbookByIdMarketId(self.extend({ 'market_id': self.market_id(symbol), }, params))['data'] result = dict() buy_orders = list() for price, amount in orderbook['buy'].items(): buy_orders.append([float(price), float(amount)]) result['buy'] = sorted(buy_orders, key=lambda t: t[0], reverse=True) sell_orders = list() for price, amount in orderbook['sell'].items(): sell_orders.append([float(price), float(amount)]) result['sell'] = sorted(sell_orders, key=lambda t: t[0]) return self.parse_order_book(result, timestamp, 'buy', 'sell') def fetch_balance(self, params={}): self.load_markets() balances = self.privateGetUserBalances(params)['data']['balances'] result = {'info': balances} for balance in balances: amount = balance['balance'] currency = self.common_currency_code(balance['currency']) account = { 'free': float(amount), 'used': 0.0, 'total': float(amount), } account['used'] = account['total'] - account['free'] result[currency] = account return self.parse_balance(result) def parse_trade(self, trade, market=None): # Common fields created_at = datetime.strptime(trade['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ") timestamp = int(created_at.timestamp() * 1000) symbol = market['symbol'] price = self.safe_float(trade, 'price') amount = self.safe_float(trade, 'market_amount') # Result result = dict() result['datetime'] = self.iso8601(timestamp) result['timestamp'] = timestamp result['symbol'] = symbol result['price'] = price result['amount'] = amount result['info'] = trade['info'] # My trade info trade_id = self.safe_string(trade, 'id') if trade_id: result['id'] = trade_id result['order'] = self.safe_string(trade, 'order_id') result['type'] = trade['type'] result['side'] = trade['side'] if trade['taker']: result['takerOrMaker'] = 'taker' else: result['takerOrMaker'] = 'maker' result['cost'] = self.safe_float(trade, 'base_amount') fee = self.safe_float(trade, 'base_fee') fee_currency = market['quote'] result['fee'] = { 'cost': fee, 'currency': fee_currency } return result def fetch_trades(self, symbol, since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) data = self.publicGetMarketMarketId(self.extend({ 'market_id': market['id'], }, params))['data'] trades = list() for trade in data['recent_trades']: trade_obj = trade.copy() trade_obj['info'] = trade trades.append(trade_obj) return self.parse_trades(trades, market, since, limit) def parse_order(self, order, market=None): order_id = self.safe_string(order, 'id') market = self.markets_by_id[order['market_id']] symbol = market['symbol'] created_at = datetime.strptime(order['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ") timestamp = int(created_at.timestamp() * 1000) side, order_type = order['order_type'].split('_') if order['open']: status = 'open' else: status = 'closed' price = self.safe_float(order, 'price') amount = self.safe_float(order, 'market_amount') remaining = self.safe_float(order, 'market_amount_remaining') filled = amount - remaining cost = filled * price trades = list() if order['trades']: for trade in order['trades']: trade_obj = trade.copy() trade_obj['order_id'] = order_id trade_obj['info'] = trade trades.append(self.parse_trade(trade_obj, market=market)) return { 'id': order_id, 'datetime': self.iso8601(timestamp), 'timestamp': timestamp, 'lastTradeTimestamp': None, 'status': status, 'symbol': symbol, 'type': order_type, 'side': side, 'price': price, 'amount': amount, 'filled': filled, 'remaining': remaining, 'cost': cost, 'trades': trades, 'info': order['info'], } def create_order(self, symbol, type, side, amount, price=None, params={}): self.load_markets() market = self.market(symbol) if side == 'buy': method = 'privatePostUserBuyLimit' else: method = 'privatePostUserSellLimit' data = getattr(self, method)(self.extend({ 'market_id': market['id'], 'price': str(self.price_to_precision(symbol, price)), 'amount': str(self.amount_to_precision(symbol, amount)), }, params))['data']['order'] # if not data: # raise InvalidOrder(self.id + ' ' + self.json(response)) order_obj = data.copy() order_obj['info'] = data order = self.parse_order(order_obj) id = order['id'] self.orders[id] = order return order def cancel_order(self, id, symbol=None, params={}): self.load_markets() result = self.privatePostUserCancelOrder(self.extend({ 'id': int(id) }, params)) return result def fetch_order(self, id, symbol=None, params={}): self.load_markets() data = self.privateGetUserOrderOrderId(self.extend({ 'order_id': id }, params))['data']['order'] order_obj = data.copy() order_obj['info'] = data order = self.parse_order(order_obj) return order def _parse_raw_orders(self, raw_orders, market, since, limit): order_objes = list() for order in raw_orders: order_obj = order.copy() order_obj['info'] = order order_objes.append(order_obj) return self.parse_orders(order_objes, market, since, limit) def fetch_orders(self, symbol=None, since=None, limit=None, params={}): self.load_markets() if symbol: market = self.market(symbol) data = self.privateGetUserMarketMarketId(self.extend({ 'market_id': int(market['id']) }, params))['data'] raw_orders = data['closed_orders'] + data['open_orders'] else: market = None raw_orders = self.privateGetUserOrders(self.extend({}, params))['data']['orders'] return self._parse_raw_orders(raw_orders, market, since, limit) def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): self.load_markets() if symbol: market = self.market(symbol) data = self.privateGetUserMarketMarketId(self.extend({ 'market_id': market['id'] }, params))['data'] raw_orders = data['open_orders'] orders = self._parse_raw_orders(raw_orders, market, since, limit) else: total_orders = self.fetch_orders(symbol, since, limit, params) orders = self.filter_by(total_orders, 'status', 'open') return orders def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): self.load_markets() if symbol: market = self.market(symbol) data = self.privateGetUserMarketMarketId(self.extend({ 'market_id': market['id'] }, params))['data'] raw_orders = data['closed_orders'] orders = self._parse_raw_orders(raw_orders, market, since, limit) else: total_orders = self.fetch_orders(symbol, since, limit, params) orders = self.filter_by(total_orders, 'status', 'closed') return orders # def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): # self.load_markets() # market = self.market(symbol) # trades = self.privatePostTradeHistory(self.extend({ # 'market': market['id'], # }, params)) # return self.parse_trades(trades['trade_history'], market, since, limit) # def fetch_deposit_address(self, code, params={}): # self.load_markets() # currency = self.currency(code) # response = self.privatePostDepositAddress(self.extend({ # 'currency': currency['id'], # }, params)) # address = self.safe_string(response, 'deposit_address') # self.check_address(address) # tag = self.safe_string(response, 'payment_id') # return { # 'currency': code, # 'address': address, # 'tag': tag, # 'info': response, # } def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = self.urls['api'] query = self.omit(params, self.extract_params(path)) url += '/' + self.implode_params(path, params) if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() if method == 'GET': if query: url += '?' + self.urlencode(query) elif query: body = self.json(query) timestamp = str(int(time.time())) request_details = method + "\n" request_details += '/v1/' + self.implode_params(path, params) + "\n" request_details += timestamp + "\n" if body: request_details += body + "\n" else: request_details += "\n" request_details += self.secret hsh = hashlib.sha256(request_details.encode("utf8")).digest() signature = base64.b64encode(hsh) headers = { "Authorization": "HMAC-SHA256 {}:{}".format(self.apiKey, signature.decode("utf8")), "HMAC-Timestamp": timestamp, } return {'url': url, 'method': method, 'body': body, 'headers': headers} def request(self, path, api='public', method='GET', params={}, headers=None, body=None): try: response = self.fetch2(path, api, method, params, headers, body) except Exception as e: raise ExchangeError(e) # if response: # success = self.safe_integer(response, 'success') # if success == 0: # message = self.safe_string(response, 'message') # if message == 'Invalid APIKey': # raise AuthenticationError(message) # raise ExchangeError(message) return response
nilq/baby-python
python
import os from typing import List # # get next filename under the [exchange directory]. if there is no folder for filename - the folder will be created # def get_next_report_filename(dir, filename_mask): filename_mask2 = filename_mask % (dir, 0) directory = os.path.dirname(filename_mask2) try: os.stat(directory) except: os.mkdir(directory) print("New directory created:", directory) deals_id = 0 while os.path.exists(filename_mask % (directory, deals_id)): deals_id += 1 return deals_id # get next filename in indexed way: if file file.txt exists so the file_0.txt will be created.. and so on def get_next_filename_index(path): path = os.path.expanduser(path) # if not os.path.exists(path): # return path root, ext = os.path.splitext(os.path.expanduser(path)) directory = os.path.dirname(root) fname = os.path.basename(root) candidate = fname+ext index = 0 ls = set(os.listdir(directory)) while candidate in ls: candidate = "{}_{}{}".format(fname,index,ext) index += 1 return os.path.join(directory, candidate) def dict_value_from_path(src_dict: dict, path: List[str], case_sensitive: bool = False): """ returns the value of dict field specified via "path" in form of a list of keys. By default the keys are matching case insensitive way. Example: src_dict = {"level1:{"level2":{"level3:value}}} list_of_keys = ["level1", "level2", "level3"] :param src_dict: dict from where to extract data b :param path: list of keys to specify the needed data :param case_sensitive: case sensototy flag for matching keys of dict against path entries :return: value of a dict branch """ s = src_dict.copy() key_upper = dict() key = "" for p in path: if not case_sensitive: key_upper_key = {key.upper(): key for key in s.keys()} key = key_upper_key[p.upper()] if p.upper() in key_upper_key else None else: key = p try: s = s[key] except Exception as e: s = None break return s
nilq/baby-python
python
#!/usr/bin/env python # coding: utf-8 # This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Challenge Notebook # ## Problem: Implement Fizz Buzz. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # * [Solution Notebook](#Solution-Notebook) # ## Constraints # # * What is fizz buzz? # * Return the string representation of numbers from 1 to n # * Multiples of 3 -> 'Fizz' # * Multiples of 5 -> 'Buzz' # * Multiples of 3 and 5 -> 'FizzBuzz' # * Can we assume the inputs are valid? # * No # * Can we assume this fits memory? # * Yes # ## Test Cases # # <pre> # * None -> Exception # * < 1 -> Exception # * 15 -> # [ # '1', # '2', # 'Fizz', # '4', # 'Buzz', # 'Fizz', # '7', # '8', # 'Fizz', # 'Buzz', # '11', # 'Fizz', # '13', # '14', # 'FizzBuzz' # ] # </pre> # ## Algorithm # # Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/arrays_strings/fizz_buzz/fizz_buzz_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. # ## Code # In[ ]: class Solution(object): def fizz_buzz(self, num): # TODO: Implement me pass # ## Unit Test # **The following unit test is expected to fail until you solve the challenge.** # In[ ]: # %load test_fizz_buzz.py import unittest class TestFizzBuzz(unittest.TestCase): def test_fizz_buzz(self): solution = Solution() self.assertRaises(TypeError, solution.fizz_buzz, None) self.assertRaises(ValueError, solution.fizz_buzz, 0) expected = [ '1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz' ] self.assertEqual(solution.fizz_buzz(15), expected) print('Success: test_fizz_buzz') def main(): test = TestFizzBuzz() test.test_fizz_buzz() if __name__ == '__main__': main() # ## Solution Notebook # # Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/arrays_strings/fizz_buzz/fizz_buzz_solution.ipynb) for a discussion on algorithms and code solutions.
nilq/baby-python
python
#IP Address of the SQL server host = "157.230.209.171" #MySql username user = "easley_1267" #MySQL password password = "ROY7iOUUQAt18r8qnsXf5jO3foUHgAbp"
nilq/baby-python
python
import pandas as pd def convert_jh_global_time_series_to_long(df, name): """Converts JH global time series data from wide to long format""" df = df.melt(id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], var_name='date', value_name=name) # Convert to datetime df['date'] = pd.to_datetime(df['date'], format="%m/%d/%y").dt.normalize() # Rename columns df.columns = ['province/state', 'country/region', 'latitude', 'longitude', 'date', name] return df def merge_dataframes(df1, df2, df3=None): """Merges JH global time series dataframes""" merged_df = pd.merge(df1, df1, on=['Province/State', 'Country/Region', 'Lat', 'Long', 'date'], how='inner') if df3: merged_df = pd.merge(merged_df, df3, on=['Province/State', 'Country/Region', 'Lat', 'Long', 'date'], how='inner') return merged_df def consolidate_country_regions(df): """Selects the rows with overall country stats and drops region column""" rtn_df = (df.loc[df['province/state'].isnull()] .drop(columns=['province/state'])) return rtn_df def get_top_n_countries(df, n, response): """ Returns a list of the top countries by response :param df: pandas dataframe :param n {int}: number of countries to select :param response {string}: deaths, confirmed, or recovered """ top_df = df.loc[df['date'] == df['date'].max()] top_df = top_df.sort_values(by=[response], ascending=False) return list(top_df['country/region'].iloc[0:n]) def clean_country_names(df): """ Given a dataframe with only 1 column 'country/region' cleans country names """ cleaned_df = df.replace({'country/region': {'US': 'United States', 'Taiwan*': 'Taiwan', 'Korea, South': 'South Korea'} }) return cleaned_df # Calculate Incidence, Prevalence, Morbidity, Mortality # https://www.health.ny.gov/diseases/chronic/basicstat.htm # Join Political Leanings # https://www.cpds-data.org/ # Freedom Index # https://rsf.org/en/ranking_table # https://www.cato.org/sites/cato.org/files/human-freedom-index-files/human-freedom-index-2019.pdf # - https://www.reddit.com/r/IntellectualDarkWeb/comments/b07on4/political_compass_of_countries_data_from_the/ # Air Pollutions # https://projects.iq.harvard.edu/files/covid-pm/files/pm_and_covid_mortality.pdf # https://ourworldindata.org/air-pollution # https://ourworldindata.org/outdoor-air-pollution # https://ourworldindata.org/indoor-air-pollution # - https://github.com/owid/covid-19-data/tree/master/public/data
nilq/baby-python
python
import time def example(seconds): print('Starting task') for i in range(seconds): print(i) time.sleep(1) print('Task completed') if __name__ == '__main__': example(10)
nilq/baby-python
python
"""The wireless version of a connection""" from Connection import Connection class Wireless_Connection(Connection): type = "Wireless_Connection" def __init__(self, source, dest): """ Create a connection between wireless devices. """ Connection.__init__(self, source, dest)
nilq/baby-python
python
from celery import shared_task @shared_task def add(a, b): return (a+b)
nilq/baby-python
python
# This file is part of the History Store (histore). # # Copyright (C) 2018-2021 New York University. # # The History Store (histore) is released under the Revised BSD License. See # file LICENSE for full license details. """Writer for archives that are materialized as Json files on the file system. """ from typing import Optional import json from histore.archive.serialize.base import ArchiveSerializer from histore.archive.serialize.default import DefaultSerializer from histore.archive.row import ArchiveRow from histore.archive.writer import ArchiveWriter from histore.document.json.writer import JsonWriter class ArchiveFileWriter(ArchiveWriter): """Archive writer that outputs rows in an archive as Json serialized rows in a text file. Each row is stored in a separate line in the text file. The output file is a Json array. The first and the last row of the file open and close the array. """ def __init__( self, filename: str, row_counter: Optional[int] = 0, serializer: Optional[ArchiveSerializer] = None, compression: Optional[str] = None, encoder: Optional[json.JSONEncoder] = None ): """Initialize the output file, row counter, and the serializer that is being used. Parameters ---------- filename: string Path to the output file. row_counter: int, default=0 Counter that is used to generate unique internal row identifier. The current value of the counter is the value for the next unique identifier. serializer: histore.archive.serialize.base.ArchiveSerializer, default=None Implementation of the archive serializer interface that is used to serialize rows that are written to file. compression: string, default=None String representing the compression mode for the output file. encoder: json.JSONEncoder, default=None Encoder used when writing archive rows as JSON objects to file. """ super(ArchiveFileWriter, self).__init__(row_counter) # Use the default serializer if no serializer was given self.serializer = serializer if serializer else DefaultSerializer() # Create Json writer for the archive rows. self.writer = JsonWriter( filename=filename, compression=compression, encoder=encoder ) def close(self): """Write the last row to the output file and close the output array and the output file. """ self.writer.close() def write_archive_row(self, row: ArchiveRow): """Add the given row to the output file. Parameters ---------- row: histore.archive.row.ArchiveRow Row in a new version of a dataset archive. """ self.write_buffer(row) def write_buffer(self, row: Optional[ArchiveRow] = None): """Write the archive row in the internal buffer to the output file. Replace the buffer with the given (next output row). Parameters ---------- row: histore.archive.row.ArchiveRow, default=None Next row in the output stream. This row will be kept in the internal buffer and the previous row is being written to the output file. """ self.writer.write(self.serializer.serialize_row(row))
nilq/baby-python
python
#!/usr/bin/env python from ALU import * import numpy as np import pandas as pd import pickle class Dataset(): def __init__(self, data_bits, path, label_bit_msk=None): if label_bit_msk is None: label_bit_msk = [True for _ in range(data_bits)] elif(len(label_bit_msk) > data_bits): raise Exception("unsupported label bit mask length") self.path = path self.data_bits = data_bits self.label_bit_msk = [i!=0 for i in label_bit_msk] self.alu = ALU(self.data_bits, ['x']) self.data_dim = self.alu.data_dim self.label_dim = min(self.alu.label_dim, sum(self.label_bit_msk)) self.filename = str() def __iter__(self): """ only support generating the whole table now If use this on tf.dataset.from_generator, plz at least suffle something use ds.shuffle(cache = 1000) """ number, ops = self.alu.gen_range() arr = lambda x : np.array(x, dtype = "uint8") for op in ops: for B in number: for A in number: data, label = self._get_data_label(A, B, op) yield arr(data), arr(label) def __call__(self, form = "csv", batch_size = 1000, shuffle = True): if form is "csv": self.path = self.path + "dataset_csv/3ops/" self.filename = "xor_{}.csv".format(self.data_bits) self._csv(shuffle) elif form is "batch": self.path = self.path + "dataset{}".format(self.data_bits) number, ops = self.alu.gen_range() datas = [] labels = [] operations = [] data_dim = self.data_dim label_dim = self.label_dim total_size = len(ops) * len(number)**2 i = 0 for op in ops: for B in number: for A in number: data, label = self._get_data_label(A, B, op) datas.append(data) labels.append(label) operations.append(op) i = i + 1 if i%batch_size is 0 or i is total_size: name = self.filename + "_"+ str(i//batch_size) actual_size = batch_size if i % batch_size is 0 else i % batch_size data_arr = np.array(datas, dtype= 'uint8').reshape((actual_size, data_dim)) label_arr = np.array(labels, dtype = 'uint8').reshape((actual_size, label_dim)) dataset = dict() dataset["data"] = data_arr dataset["label"] = label_arr dataset["operations"] = operations with open(self.path + name + '.batch', 'wb+') as f: pickle.dump(dataset, f, protocol=pickle.HIGHEST_PROTOCOL) datas = [] labels = [] operations = [] else: raise Exception("Illegal format type") def _csv(self, shuffle = False): number, ops = self.alu.gen_range() datas = [] labels = [] data_dim = self.alu.data_dim-1 label_dim = self.label_dim total_size = len(ops) * len(number)**2 i = 0 for op in ops: for B in number: for A in number: data, label = self._get_data_label(A, B, op) datas.append(data) labels.append(label) data_arr = np.array(datas, dtype='uint8').reshape((total_size, data_dim)) label_arr = np.array(labels, dtype = 'uint8').reshape((total_size, label_dim)) df = pd.DataFrame(np.hstack((data_arr, label_arr))) if shuffle: df = df.sample(frac=1).reset_index(drop=True) df.to_csv(self.path + self.filename, header=False, index=False) def _get_data_label(self, A, B, op): """ return the list of data and label """ in1, in2, opc, out = self.alu(A, B, op) data = list(in1) + list(in2)# + list(opc) label = list(out) label = [i for i,j in zip(label, self.label_bit_msk) if j] return data, label if __name__ == '__main__': import os script_path = os.path.abspath(__file__) project_dir = script_path[:script_path.rfind("src")] output_path = project_dir + "dataset/" # import pathlib # project_path = pathlib.Path(__file__).parent.parent.parent # output_path = project_path / "dataset" # ds = Dataset(6, "ALU-6-14_batch", output_path) ds = Dataset(6, output_path, [True for i in range(6)]) ds() # for data, label in iter(ds): # print(data) # print(label)
nilq/baby-python
python
''' Regrid the GBT data to match the VLA HI data. ''' from spectral_cube import SpectralCube from astropy.utils.console import ProgressBar import numpy as np import os from cube_analysis.io_utils import create_huge_fits from paths import fourteenB_HI_data_path, data_path # Load the non-pb masked cube vla_cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits")) gbt_path = os.path.join(data_path, "GBT") cube = SpectralCube.read(os.path.join(gbt_path, "m33_gbt_vlsr_highres.fits")) # Ta* to T_mb as per @low-sky Tmb_conv = 1.052 save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088_spectralregrid.fits") # Spectral interpolation, followed by reprojection. if not os.path.exists(save_name): cube = cube.spectral_interpolate(vla_cube.spectral_axis) if cube._is_huge: output_fits = create_huge_fits(save_name, cube.header, return_hdu=True) for chan in ProgressBar(cube.shape[0]): output_fits[0].data[chan] = cube[chan].value * Tmb_conv output_fits.flush() output_fits.close() else: (cube * Tmb_conv).write(save_name, overwrite=True) else: cube = SpectralCube.read(save_name) # Make the reprojected header new_header = cube.header.copy() new_header["NAXIS"] = 3 new_header["NAXIS1"] = vla_cube.shape[2] new_header["NAXIS2"] = vla_cube.shape[1] new_header["NAXIS3"] = vla_cube.shape[0] kwarg_skip = ['TELESCOP', 'BUNIT', 'INSTRUME'] for key in cube.header: if key == 'HISTORY': continue if key in vla_cube.header: if "NAXIS" in key: continue if key in kwarg_skip: continue new_header[key] = vla_cube.header[key] new_header.update(cube.beam.to_header_keywords()) new_header["BITPIX"] = -32 # We're going to convert to Tmb below new_header.comments['BUNIT'] = 'Tmb' # Build up the reprojected cube per channel save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088.fits") output_fits = create_huge_fits(save_name, new_header, return_hdu=True) targ_header = vla_cube[0].header for chan in ProgressBar(cube.shape[0]): reproj_chan = \ cube[chan].reproject(targ_header).value.astype(np.float32) output_fits[0].data[chan] = reproj_chan if chan % 200 == 0: output_fits.flush() output_fits.close() # Now do it again from the native gridding size cube = SpectralCube.read(os.path.join(gbt_path, "m33_gbt_vlsr.fits")) # Ta* to T_mb as per @low-sky Tmb_conv = 1.052 save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088_spectralregrid.fits") # Spectral interpolation, followed by reprojection. if not os.path.exists(save_name): cube = cube.spectral_interpolate(vla_cube.spectral_axis) if cube._is_huge: output_fits = create_huge_fits(save_name, cube.header, return_hdu=True) for chan in ProgressBar(cube.shape[0]): output_fits[0].data[chan] = cube[chan].value * Tmb_conv output_fits.flush() output_fits.close() else: (cube * Tmb_conv).write(save_name, overwrite=True) else: cube = SpectralCube.read(save_name) # Make the reprojected header new_header = cube.header.copy() new_header["NAXIS"] = 3 new_header["NAXIS1"] = vla_cube.shape[2] new_header["NAXIS2"] = vla_cube.shape[1] new_header["NAXIS3"] = vla_cube.shape[0] kwarg_skip = ['TELESCOP', 'BUNIT', 'INSTRUME'] for key in cube.header: if key == 'HISTORY': continue if key in vla_cube.header: if "NAXIS" in key: continue if key in kwarg_skip: continue new_header[key] = vla_cube.header[key] new_header.update(cube.beam.to_header_keywords()) new_header["BITPIX"] = -32 # We're going to convert to Tmb below new_header.comments['BUNIT'] = 'Tmb' # Build up the reprojected cube per channel save_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088.fits") output_fits = create_huge_fits(save_name, new_header, return_hdu=True) targ_header = vla_cube[0].header for chan in ProgressBar(cube.shape[0]): reproj_chan = \ cube[chan].reproject(targ_header).value.astype(np.float32) output_fits[0].data[chan] = reproj_chan if chan % 200 == 0: output_fits.flush() output_fits.close()
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- class TX(): def __init__(self): self.txid = '' self.inputs = [] self.outputs = [] self.block_height = 0 self.confirmations = 0 def print_tx(self): print '\nblock ', str(self.block_height), "(" + str(self.confirmations) + " confirmations)", self.txid print 'IN:', self.inputs print 'OUT:', self.outputs print 'primeInput:', self.prime_input_address() def prime_input_address(self): addresses = [] for tx_input in self.inputs: addresses.append(tx_input['address']) return sorted(addresses)[0] def received_value(self, address): value = 0 for output in self.outputs: if output['address'] == address: value += output['value'] return value def is_receiving_tx(self, address): received = True for tx_input in self.inputs: if tx_input['address'] == address: received = False return received def sent_value(self, address): value = 0 for tx_input in self.inputs: if tx_input['address'] == address: value += tx_input['value'] change = 0 for tx_output in self.outputs: if tx_output['address'] == address: change += tx_output['value'] return value-change def is_sending_tx(self, address): sending = False for tx_input in self.inputs: if tx_input['address'] == address: sending = True return sending def to_dict(self, address): tx_dict = {"txid": self.txid, "prime_input_address": self.prime_input_address(), "inputs": self.inputs, "outputs": self.outputs, "block_height": self.block_height, "confirmations": self.confirmations, "receiving": self.is_receiving_tx(address)} if tx_dict["receiving"] is True: tx_dict["receivedValue"] = self.received_value(address) else: tx_dict["sentValue"] = self.sent_value(address) return tx_dict
nilq/baby-python
python
from series import fibonacci, lucas, sum_series # Fibonacci tests" # Expected Outcome def test_zero(): expected = 0 actual = fibonacci(0) assert actual == expected def test_one(): expected = 1 actual = fibonacci(1) assert actual == expected def test_15n(): expected = 610 actual = fibonacci(15) assert actual == expected # Edge Case def test_negative(): expected = 0 actual = fibonacci(-4) assert actual == expected # Expected Failure def test_letter(): expected = "Input should be a one integer" actual = fibonacci("a") assert actual == expected def test_float(): expected = "Input should be a one integer" actual = fibonacci(1.354) assert actual == expected # LUCAS TESTS # Expected Outcome def test_zero_lucas(): expected = 2 actual = lucas(0) assert actual == expected def test_one_lucas(): expected = 1 actual = lucas(1) assert actual == expected def test_three_lucas(): expected = 4 actual = lucas(3) assert actual == expected def test_four_lucas(): expected = 7 actual = lucas(4) assert actual == expected def test_15n_lucas(): expected = 1364 actual = lucas(15) assert actual == expected # Edge case def test_negative_lucas(): expected = 2 actual = lucas(-4) assert actual == expected # expected failure def test_15n_lucas(): expected = "Input should be a one integer" actual = lucas("a") assert actual == expected # SUM_SERIES TESTS # Expected Outcome def test_zero_sum_series_fibonacci(): expected = 0 actual = sum_series(0) assert actual == expected def test_zero_sum_series_fibonacci_params(): expected = 0 actual = sum_series(0, 0, 1) assert actual == expected def test_zero_sum_series_lucas(): expected = 2 actual = sum_series(0, 2, 1) assert actual == expected def test_sum_series_new_sequence(): expected = 123 actual = sum_series(8, 3, 4) assert actual == expected # Edge Cases def test_sum_series_new_sequence_negative(): expected = 3 actual = sum_series(-4, 3, 4) assert actual == expected def test_sum_series_new_sequence_negative_params(): expected = 6 actual = sum_series(4, -3, 4) assert actual == expected # expected failure def test_sum_series_letters(): expected = "Input allows only integers" actual = sum_series('a', 3, 4) assert actual == expected def test_sum_series_letters_in_params(): expected = "Input allows only integers" actual = sum_series(5, 'a', 4) assert actual == expected
nilq/baby-python
python
from django.urls import path from . import views app_name = 'orders' urlpatterns = [ path('create/', views.order_create, name='order_create'), path( 'order_list/<str:username>/', views.orderlist, name='order_list' ), path( 'order_list/<int:id>/detail/', views.orderlistdetail, name='order_list_detail' ), path( 'my_sales/<str:username>/', views.ordersales, name='ordersales' ), path( 'my_sales/<str:username>/', views.ordersales, name='ordersales' ), ]
nilq/baby-python
python
''' Python 3.6 This script contains functions to clean the text in the tweets. Methods here are not called directly. Instead, they are called from either "NLTK_clean_tweet_testing.py" or "TextBlob_clean_tweet_testing.py" ''' print("Importing tweetCleaner...") from bs4 import BeautifulSoup import re from nltk.stem import WordNetLemmatizer wordnet_lemmatizer = WordNetLemmatizer() """ Returns a list of stopwords called StopWordList. The file containing the stopwords is titled "stopwords.txt". """ def StopWordListCreator(): StopWordList = [] with open("stopwords.txt","r",encoding="utf-8") as stopwords: for stopword in stopwords.readlines(): StopWordList.append(stopword[:-1]) return StopWordList def StopWordRemover(tweet): ''' Removes all stopwords in the tweet, w.r.t. the StopWordList created above. ''' tweet_words = tweet.split() new_tweet = [] for word in tweet_words: if word in StopWordListCreator(): pass else: new_tweet.append(word) return (" ").join(new_tweet) def lowercase(tweet): ''' Returns the tweet in lowercase. ''' return tweet.lower() def removeSpecialChars(tweet): ''' Removes special characters which are specifically found in tweets. ''' #Converts HTML tags to the characters they represent soup = BeautifulSoup(tweet, "html.parser") tweet = soup.get_text() #Convert www.* or https?://* to empty strings tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','',tweet) #Convert @username to empty strings tweet = re.sub('@[^\s]+','',tweet) #Remove additional white spaces tweet = re.sub('[\s]+', ' ', tweet) #Replace #word with word tweet = re.sub(r'#([^\s]+)', r'\1', tweet) #Trims the tweet tweet = tweet.strip('\'"') return tweet def removeAllNonAlpha(tweet): ''' Remove all characters which are not alphabets, numbers or whitespaces. ''' tweet = re.sub('[^A-Za-z0-9 ]+','', tweet) return tweet def lemmatizer(tweet): ''' Attempts to replace every individual word with it's root word. ''' word_list = [] for word in tweet.split(): word_list.append(wordnet_lemmatizer.lemmatize(word)) return (" ".join(word_list)) print("Finished importing tweetCleaner.")
nilq/baby-python
python
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import json import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion MAX_FLOAT = 1e30 MIN_FLOAT = -1e30 @register_criterion("coqa") class CoqaCriterion(FairseqCriterion): def __init__(self, task, ranking_head_name, save_predictions): super().__init__(task) self.ranking_head_name = ranking_head_name self.start_n_top = 5 ################################## self.end_n_top = 5 if save_predictions is not None: self.prediction_h = True else: self.prediction_h = None def __del__(self): pass #if self.prediction_h is not None: # self.prediction_h.close() @staticmethod def add_args(parser): # fmt: off parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to') parser.add_argument('--ranking-head-name', default='coqa', help='name of the classification head to use') parser.add_argument('--n-best-size', default=5, help='n best size for predictions') parser.add_argument('--start-n-top', default=5, help='Beam size for span start') parser.add_argument('--end-n-top', default=5, help='Beam size for span end') # fmt: on def get_masked_data(self, data, mask): return data * mask+MIN_FLOAT * (1-mask) def tile(self, data, size): for dim in range(-1, -1*len(size)-1, -1): multiple_num = size[dim] ori_data = data for _ in range(multiple_num-1): data = torch.cat([data, ori_data], dim=dim) return data def forward(self, model, sample, reduce=True): ####fairseq_task.py 430줄 """Compute ranking loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ def compute_loss(label, predict, predict_mask, label_smoothing=0.0): masked_predict = self.get_masked_data(predict, predict_mask) masked_predict = predict #[b,l] if label_smoothing > 1e-10: onehot_label = F.one_hot(label, masked_predict.size(-1)) onehot_label = (onehot_label * (1-label_smoothing) + label_smoothing / masked_predict.size(-1).FloatTensor()) * predict_mask log_likelihood = F.log_softmax(masked_predict, dim=-1) loss = - (onehot_label*log_likelihood).sum(-1) else: CEL = torch.nn.CrossEntropyLoss() loss = CEL(masked_predict, label) return loss assert ( hasattr(model, "classification_heads") and self.ranking_head_name in model.classification_heads ), "model must provide sentence ranking head for --criterion=coqa" logits, _ = model( sample["net_input"], classification_head_name=self.ranking_head_name, ) p_mask = sample["net_input"]["p_mask"] preds = {} target_exist = sample["start_position"]!=None ##start start_result = logits["start_result"] sample_size = start_result.size()[0] start_result_mask = 1-p_mask start_result = torch.squeeze(start_result, dim=-1) start_result = self.get_masked_data(start_result, start_result_mask) start_prob = F.softmax(start_result, dim=-1) if not self.training: start_top_prob, start_top_index = torch.topk(start_prob, k=self.start_n_top) preds["start_prob"] = start_top_prob preds["start_index"] = start_top_index ##end end_result = logits["end_result"] if self.training: end_result_mask = 1-p_mask end_result = torch.squeeze(end_result, dim=-1) end_result = self.get_masked_data(end_result, end_result_mask) end_prob = F.softmax(end_result, dim=-1) else: end_result_mask = torch.unsqueeze(1-p_mask, dim=1) end_result_mask = self.tile(end_result_mask, (1, self.start_n_top, 1)) end_result = torch.transpose(torch.squeeze(end_result, dim=-1), 1, 2) end_result = self.get_masked_data(end_result, end_result_mask) end_prob = F.softmax(end_result, dim=-1) end_top_prob, end_top_index = torch.topk(end_prob, k=self.start_n_top) preds["end_prob"] = end_top_prob preds["end_index"] = end_top_index ##unk unk_result = logits["unk_result"] unk_result_mask = torch.max(1-p_mask, dim=-1).values unk_result = torch.squeeze(unk_result, dim=-1) unk_result = self.get_masked_data(unk_result, unk_result_mask) unk_prob = F.sigmoid(unk_result) preds["unk_prob"] = unk_prob ##yes yes_result = logits["yes_result"] yes_result_mask = torch.max(1-p_mask, dim=-1).values yes_result = torch.squeeze(yes_result, dim=-1) yes_result = self.get_masked_data(yes_result, yes_result_mask) yes_prob = F.sigmoid(yes_result) preds["yes_prob"] = yes_prob ##no no_result = logits["no_result"] no_result_mask = torch.max(1-p_mask, dim=-1).values no_result = torch.squeeze(no_result, dim=-1) no_result = self.get_masked_data(no_result, no_result_mask) no_prob = F.sigmoid(no_result) preds["no_prob"] = no_prob ##num num_result = logits["num_result"] num_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values num_result = self.get_masked_data(num_result, num_result_mask) num_probs = F.softmax(num_result, dim=-1) preds["num_probs"] = num_probs ##opt opt_result = logits["opt_result"] opt_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values opt_result = self.get_masked_data(opt_result, opt_result_mask) opt_probs = F.softmax(opt_result, dim=-1) preds["opt_probs"] = opt_probs if target_exist and self.training: start_label = sample["start_position"] start_loss = compute_loss(start_label, start_result, 1-p_mask) # [b],[b,l],[b,l] end_label = sample["end_position"] end_loss = compute_loss(end_label, end_result, 1-p_mask) # [b], [b,l], [b,l] loss = torch.mean(start_loss + end_loss) unk_label = sample["is_unk"] unk_loss = F.binary_cross_entropy_with_logits(unk_result, unk_label.half()) loss += torch.mean(unk_loss) yes_label = sample["is_yes"] yes_loss = F.binary_cross_entropy_with_logits(yes_result, yes_label.half()) loss += torch.mean(yes_loss) no_label = sample["is_no"] no_loss = F.binary_cross_entropy_with_logits(no_result, no_label.half()) loss += torch.mean(no_loss) num_label = sample["number"] num_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values num_loss = compute_loss(num_label, num_result, num_result_mask) loss += torch.mean(num_loss) opt_label = sample["option"] opt_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values opt_loss = compute_loss(opt_label, opt_result, opt_result_mask) loss += torch.mean(opt_loss) targets = sample elif target_exist: start_label = sample["start_position"] start_loss = compute_loss(start_label, start_result, 1-p_mask) # [b],[b,l],[b,l] end_label = sample["end_position"] end_result = end_result[:,0,:] end_loss = compute_loss(end_label, end_result, 1-p_mask) # [b],[b,k,l],[b,l] loss = torch.mean(start_loss + end_loss) unk_label = sample["is_unk"] unk_loss = F.binary_cross_entropy_with_logits(unk_result, unk_label.half()) loss += torch.mean(unk_loss) yes_label = sample["is_yes"] yes_loss = F.binary_cross_entropy_with_logits(yes_result, yes_label.half()) loss += torch.mean(yes_loss) no_label = sample["is_no"] no_loss = F.binary_cross_entropy_with_logits(no_result, no_label.half()) loss += torch.mean(no_loss) num_label = sample["number"] num_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values num_loss = compute_loss(num_label, num_result, num_result_mask) loss += torch.mean(num_loss) opt_label = sample["option"] opt_result_mask = torch.max(1-p_mask, dim=-1, keepdim=True).values opt_loss = compute_loss(opt_label, opt_result, opt_result_mask) loss += torch.mean(opt_loss) targets = sample else: loss = torch.tensor(0.0, requires_grad=True) targets = None logging_output = { "loss": loss.data, "ntokens": sample["ntokens"], "nsentences": sample_size, "sample_size": sample_size, } if self.prediction_h is not None and not self.training: predictions = [] for i in range(sample["nsentences"]): pred = {} pred["unique_id"] = sample["id"].tolist()[i] pred["qas_id"] = sample["qas_id"].tolist()[i] pred["start_prob"] = preds["start_prob"].tolist()[i] pred["start_index"] = preds["start_index"].tolist()[i] pred["end_prob"] = preds["end_prob"].tolist()[i] pred["end_index"] = preds["end_index"].tolist()[i] pred["unk_prob"] = preds["unk_prob"].tolist()[i] pred["yes_prob"] = preds["yes_prob"].tolist()[i] pred["no_prob"] = preds["no_prob"].tolist()[i] pred["num_probs"] = preds["num_probs"].tolist()[i] pred["opt_probs"] = preds["opt_probs"].tolist()[i] prediction = json.dumps(pred) predictions.append(prediction) #self.prediction_h.write(prediction) #self.prediction_h.write("\n") return loss, predictions, sample_size, logging_output return loss, sample_size, logging_output ###한번의 batch마다 불러짐 @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
nilq/baby-python
python
# -*- coding: UTF-8 -*- from __future__ import unicode_literals from django.shortcuts import render, get_object_or_404 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from .models import MyModel def mymodel_list(request): paginate_by = 24 qs = MyModel.objects.all() paginator = Paginator(qs, paginate_by) page_number = request.GET.get("page") try: page = paginator.page(page_number) except PageNotAnInteger: # If page parameter is not an integer, show first page. page = paginator.page(1) except EmptyPage: # If page parameter is out of range, show last existing page. page = paginator.page(paginator.num_pages) context = { 'object_list': page, } return render(request, "{{ app_name }}/mymodel_list.html", context) def mymodel_details(request, object_id): instance = get_object_or_404(MyModel, pk=object_id) context = { 'object': instance, } return render(request, "{{ app_name }}/mymodel_details.html", context)
nilq/baby-python
python
import tkinter as tk from sudokuUI import SudokuUI root = tk.Tk() #p = [ [0,i,i+1] for i in range(9) ] + [ [1,(i+3)% 9, i + 1] for i in range(9)] + [ [2,(i+6) % 9, i+1] for i in range(9)] + [[3,(i+1)%9,i+1] for i in range(9)] + [[4,(i+4)%9,i+1] for i in range(9)] + [[5, (i+7)% 9, i + 1] for i in range(9)] + [[6,(i+2)%9,i+1] for i in range(9)] + [[7,(i+5)%9,i+1] for i in range(9)] + [[8, (i+8)% 9, i + 1] for i in range(9)] p = [ [0,i,i+1] for i in range(9) ] + [ [1,(i+3)% 9, i + 1] for i in range(9)] + [ [2,(i+6) % 9, i+1] for i in range(9)] + [[3,(i+1)%9,i+1] for i in range(9)] + [[4,(i+4)%9,i+1] for i in range(9)] + [[5, (i+7)% 9, i + 1] for i in range(9)] + [[6,(i+2)%9,i+1] for i in range(9)] + [[7,(i+5)%9,i+1] for i in range(9)] s = SudokuUI(root, 60, p) root.geometry("800x800") root.mainloop()
nilq/baby-python
python
# SPDX-License-Identifier: MIT # Copyright (c) 2021 scmanjarrez. All rights reserved. # This work is licensed under the terms of the MIT license. from contextlib import closing import sqlite3 as sql DB = 'diptico.db' def setup_db(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.executescript( """ CREATE TABLE IF NOT EXISTS parts ( part INTEGER, volume INTEGER, title TEXT, url TEXT, finished INTEGER DEFAULT 0, PRIMARY KEY (part, volume) ); CREATE TABLE IF NOT EXISTS chapters ( part INTEGER, volume INTEGER, title TEXT, url TEXT, new INTEGER DEFAULT 1, FOREIGN KEY (part, volume) REFERENCES parts(part, volume), PRIMARY KEY (part, volume, title) ); CREATE TABLE IF NOT EXISTS mestionora ( title TEXT PRIMARY KEY ); CREATE TABLE IF NOT EXISTS users ( uid INTEGER PRIMARY KEY, notifications INTEGER DEFAULT 1 ); """ ) def parts(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT part, volume, title, url FROM parts') return cur.fetchall() def name_part(part): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT DISTINCT(title) FROM parts WHERE part = ?', [part]) return cur.fetchone()[0] def total_parts(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT part, title ' 'FROM parts ' 'ORDER BY rowid') ret = cur.fetchall() group = [[(p, t) for p, t in ret if p == r] for r in range(1, ret[-1][0] + 1)] return [max(set(g), key=g.count) for g in group] def n_parts(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT count(DISTINCT part) ' 'FROM parts') return cur.fetchone()[0] def n_volumes(part): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT count(volume) ' 'FROM parts ' 'WHERE part = ?', [part]) return cur.fetchone()[0] def total_volumes(part): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT volume ' 'FROM parts ' 'WHERE part = ? ' 'ORDER BY rowid', [part]) return cur.fetchall() def unfinished_part(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT part, volume, title, url ' 'FROM parts ' 'WHERE finished = 0') return cur.fetchone() def add_part(part, volume, title, url): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('INSERT INTO parts ' '(part, volume, title, url) ' 'VALUES (?, ?, ?, ?)', [part, volume, title, url]) db.commit() def part_cached(part, volume): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute( 'SELECT EXISTS (' 'SELECT 1 FROM parts ' 'WHERE part = ? AND volume = ?' ')', [part, volume]) return cur.fetchone()[0] def set_finished(part, volume): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('UPDATE parts ' 'SET finished = 1 ' 'WHERE part = ? AND volume = ?', [part, volume]) db.commit() def chapters(part, volume): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT title, url ' 'FROM chapters ' 'WHERE part = ? AND volume = ? ' 'ORDER BY rowid', [part, volume]) return cur.fetchall() def n_chapters(part, volume): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT count(title) ' 'FROM chapters ' 'WHERE part = ? AND volume = ?', [part, volume]) return cur.fetchall() def new_chapters(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT part, volume, title ' 'FROM chapters ' 'WHERE new = 1') return cur.fetchall() def add_chapter(part, volume, title, url): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('INSERT INTO chapters ' '(part, volume, title, url) ' 'VALUES (?, ?, ?, ?)', [part, volume, title, url]) db.commit() def chapter_cached(part, volume, title): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute( 'SELECT EXISTS (' 'SELECT 1 FROM chapters ' 'WHERE part = ? AND volume = ? AND title = ?)', [part, volume, title]) return cur.fetchone()[0] def unset_new(part, volume, title): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('UPDATE chapters ' 'SET new = 0 ' 'WHERE part = ? AND volume = ? ' 'AND title = ?', [part, volume, title]) db.commit() def add_mestionora(titles): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('DELETE FROM mestionora') cur.executemany('INSERT INTO mestionora ' 'VALUES (?)', [(tit,) for tit in titles]) db.commit() def mestionora_chapters(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT * FROM mestionora') return [ch[0] for ch in cur.fetchall()] def users(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT uid FROM users') return cur.fetchall() def cached(uid): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute( 'SELECT EXISTS (' 'SELECT 1 FROM users WHERE uid = ?' ')', [uid]) return cur.fetchone()[0] def add_user(uid): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('INSERT INTO users (uid) VALUES (?)', [uid]) db.commit() def del_user(uid): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('DELETE FROM users ' 'WHERE uid = ?', [uid]) db.commit() def notifications(uid): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT notifications FROM users ' 'WHERE uid = ?', [uid]) return cur.fetchone()[0] # (x,) def toggle_notifications(uid): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('UPDATE users SET notifications = -notifications ' 'WHERE uid = ?', [uid]) db.commit() def all_users_notify(): with closing(sql.connect(DB)) as db: with closing(db.cursor()) as cur: cur.execute('SELECT uid FROM users ' 'WHERE notifications = 1') return cur.fetchall()
nilq/baby-python
python
######## # Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############ import os import yaml import logging from distutils.version import StrictVersion from cloudify.manager import get_rest_client from cloudify.utils import get_admin_api_token from cloudify.constants import EVENTS_EXCHANGE_NAME from cloudify_agent.worker import ( CloudifyOperationConsumer, ) logger = logging.getLogger('mgmtworker') class HookConsumer(CloudifyOperationConsumer): routing_key = 'events.hooks' HOOKS_CONFIG_PATH = '/opt/mgmtworker/config/hooks.conf' def __init__(self, queue_name, registry, max_workers=5): super(HookConsumer, self).__init__(queue_name, exchange_type='topic', registry=registry, threadpool_size=max_workers) self.queue = queue_name self.exchange = EVENTS_EXCHANGE_NAME def handle_task(self, full_task): event_type = full_task['event_type'] hook = self._get_hook(event_type) if not hook: return logger.info( 'The hook consumer received `{0}` event and the hook ' 'implementation is: `{1}`'.format(event_type, hook.get('implementation')) ) try: task = self._get_task(full_task, hook) result = super(HookConsumer, self).handle_task(task) except Exception as e: result = {'ok': False, 'error': e.message} logger.error('{0!r}, while running the hook triggered by the ' 'event: {1}'.format(e, event_type)) return result def _get_hook(self, event_type): if not os.path.exists(self.HOOKS_CONFIG_PATH): logger.warn("The hook consumer received `{0}` event but the " "hooks config file doesn't exist".format(event_type)) return None with open(self.HOOKS_CONFIG_PATH) as hooks_conf_file: try: hooks_yaml = yaml.safe_load(hooks_conf_file) hooks_conf = hooks_yaml.get('hooks', {}) if hooks_yaml else {} except yaml.YAMLError: logger.error( "The hook consumer received `{0}` event but the hook " "config file is invalid yaml".format(event_type) ) return None for hook in hooks_conf: if hook.get('event_type') == event_type: return hook logger.info("The hook consumer received `{0}` event but didn't find a " "compatible hook in the configuration".format(event_type)) return None def _get_task(self, full_task, hook): hook_context, operation_context = self._get_contexts( full_task, hook['implementation'] ) task = { 'cloudify_task': { 'kwargs': { '__cloudify_context': operation_context }, 'args': [hook_context] } } kwargs = hook.get('inputs') or {} task['cloudify_task']['kwargs'].update(kwargs) return task def _get_contexts(self, full_task, implementation): hook_context = full_task['context'] tenant = hook_context.pop('tenant') tenant_name = tenant.get('name') hook_context['tenant_name'] = tenant.get('name') hook_context['event_type'] = full_task['event_type'] hook_context['timestamp'] = full_task['timestamp'] hook_context['arguments'] = full_task['message']['arguments'] operation_context = dict( type='hook', tenant=tenant, no_ctx_kwarg=True, task_target=self.queue, tenant_name=tenant_name, rest_token=hook_context.get('rest_token'), plugin=self._get_plugin(tenant_name, implementation) ) if operation_context['plugin']: split_task_name = implementation.split('.')[1:] operation_context['task_name'] = '.'.join(split_task_name) else: operation_context['task_name'] = implementation return hook_context, operation_context def _get_plugin(self, tenant_name, implementation): package_name = implementation.split('.')[0] filter_plugin = {'package_name': package_name} admin_api_token = get_admin_api_token() rest_client = get_rest_client(tenant=tenant_name, api_token=admin_api_token) plugins = rest_client.plugins.list(**filter_plugin) if not plugins: return {} plugins.sort(key=lambda p: StrictVersion(p.package_version), reverse=True) return { 'package_name': package_name, 'package_version': plugins[0]['package_version'], 'visibility': plugins[0]['visibility'] }
nilq/baby-python
python
# Generated by Django 2.2.3 on 2019-07-30 13:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('events', '0002_auto_20190730_0034'), ('profiles', '0002_profile_follows'), ] operations = [ migrations.AddField( model_name='profile', name='favorites', field=models.ManyToManyField(related_name='favorited_by', to='events.Event'), ), ]
nilq/baby-python
python
import numpy as np import warnings from time import sleep from main import get_prediction from example_data_base import save_historical_data, get_historical_data prediction_rates = {} def get_random_array(n): return np.random.randint(0, 10, n).tolist() def convert_to_db_format(predictions): cars = predictions.get('cars') features = predictions.get('features') hist_array = [] for c in cars: car_rate = c['rate'] if car_rate in prediction_rates: prediction_rates[car_rate] += 1 else: prediction_rates[car_rate] = 1 car_record = [] car_record.extend(features) car_record.append(c['name']) car_record.append(get_rate(c['doubleRate'])) hist_array.append(car_record) return hist_array def get_rate(predict_rate): if predict_rate > 0.39: return 5.0 elif predict_rate > 0.29: return 4.0 elif predict_rate > 0.19: return 3.0 elif predict_rate > 0.09: return 2.0 else: return 1.0 def generate_data(n): for i in range(n): engine = get_random_array(5) car_body = get_random_array(4) costs = get_random_array(3) car_details = get_random_array(3) equipment = get_random_array(3) driving_features = get_random_array(4) arguments = {"engine": engine, "car_body": car_body, "costs": costs, "car_details": car_details, "equipment": equipment, "driving_features": driving_features} predictions = get_prediction(arguments) db_records = convert_to_db_format(predictions) save_historical_data(db_records, python_call=True) print("Finished for [%d/%d]" % (i + 1, n)) sleep(1) if __name__ == "__main__": warnings.filterwarnings("ignore") generate_data(150) history = get_historical_data(python_call=True) print(len(history)) for p in prediction_rates: print(p, prediction_rates[p])
nilq/baby-python
python
# Ivan Carvalho # Solution to https://www.urionlinejudge.com.br/judge/problems/view/2057 #!/usr/bin/env python2.7 # encoding : utf-8 numero = sum([int(i) for i in raw_input().split(" ")]) if numero < 0: print numero + 24 elif numero < 24: print numero else: print numero-24
nilq/baby-python
python
"""Centralized setup of logging for the service.""" import logging.config import sys from os import path def setup_logging(conf): """Create the services logger.""" if conf and path.isfile(conf): logging.config.fileConfig(conf) print("Configure logging, from conf:{}".format(conf), file=sys.stdout) return logging.getLogger(__name__) else: print( "Unable to configure logging, attempted conf:{}".format(conf), file=sys.stderr, ) def log_error(msg): """Log error.""" logging.error(msg) def log_bpm_error(msg): """Log error.""" logging.error(msg) logging.error( "The connection with Python and Camunda API is not proper. Ensure you have passed env variables properly and have set listener in Keycloak(camunda-rest-api)" ) def log_info(msg): """Log info.""" logging.info(msg)
nilq/baby-python
python
import setuptools setuptools.setup( name='pytorch-nce2', version='0.0.1', author='Kaiyu Shi', author_email='skyisno.1@gmail.com', description='An NCE implementation in pytorch', long_description=open('README.md').read(), long_description_content_type='text/markdown', url='https://github.com/Stonesjtu/Pytorch-NCE', packages=['nce'], classifiers=[ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
nilq/baby-python
python
import os import errno import librosa import librosa.display import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split DATA_PATH = 'raw_data' SAMPLE_RATE = 16000 DURATION = 2.5 OFFSET = 0.5 HOP_LENGTH = 512 # MFCC -> (n_mfcc, t) # t = sample_rate * time / hop_length MAX_LENGTH = int((SAMPLE_RATE * DURATION // HOP_LENGTH) + 1) def preprocess_data(): dir_lists = os.listdir(DATA_PATH) mfcc_vectors = [] labels = [] for dir_list in dir_lists: if dir_list == '.DS_Store': continue file_path = os.path.join(DATA_PATH, dir_list) files = os.listdir(file_path) print("==================== {} ====================".format(dir_list)) for file in files: if file == '.DS_Store': continue label = get_label(file.strip('.wav')) mfcc = wav2mfcc(os.path.join(file_path, file), duration=DURATION, offset=OFFSET) print(file, mfcc.shape, label.shape) mfcc_vectors.append(mfcc) labels.append(label) mfcc_vectors = np.array(mfcc_vectors) labels = np.array(labels) np.savez('train_data.npz', x_train=mfcc_vectors, y_train=labels) print(mfcc_vectors.shape, labels.shape) def get_label(file_name): ''' Filename identifiers Modality (01 = full-AV, 02 = video-only, 03 = audio-only). Vocal channel (01 = speech, 02 = song). Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised). Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the ‘neutral’ emotion. Statement (01 = “Kids are talking by the door”, 02 = “Dogs are sitting by the door”). Repetition (01 = 1st repetition, 02 = 2nd repetition). Actor (01 to 24. Odd numbered actors are male, even numbered actors are female). ''' file_name = file_name.split('-') label = [] if int(file_name[6])%2 != 0: # male if file_name[2] == '01': label.append(0) elif file_name[2] == '02': label.append(1) elif file_name[2] == '03': label.append(2) elif file_name[2] == '04': label.append(3) elif file_name[2] == '05': label.append(4) elif file_name[2] == '06': label.append(5) elif file_name[2] == '07': label.append(6) elif file_name[2] == '08': label.append(7) else: # female if file_name[2] == '01': label.append(8) elif file_name[2] == '02': label.append(9) elif file_name[2] == '03': label.append(10) elif file_name[2] == '04': label.append(11) elif file_name[2] == '05': label.append(12) elif file_name[2] == '06': label.append(13) elif file_name[2] == '07': label.append(14) elif file_name[2] == '08': label.append(15) label = np.array(label) return label def wav2mfcc(file_path, sr=None, offset=0.0, duration=None, n_mfcc=13, max_length=MAX_LENGTH): data, sr = librosa.load(file_path, mono=True, sr=sr, offset=offset, duration=duration) data = data[::3] mfcc = librosa.feature.mfcc(data, sr=16000, n_mfcc=n_mfcc) if (max_length > mfcc.shape[1]): #print(max_length, mfcc.shape[1]) pad_width = max_length - mfcc.shape[1] mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant') else: mfcc = mfcc[:, :max_length] ''' # plot plt.figure() plt.subplot(2,1,1) librosa.display.waveplot(data, sr=sr) plt.subplot(2,1,2) librosa.display.specshow(mfcc, x_axis='time') #plt.colorbar() plt.title('MFCC') plt.tight_layout() plt.show() ''' return mfcc def load_dataset(split_ratio=0.8, random_state=42): data = np.load('train_data.npz') x_train, y_train = data['x_train'], data['y_train'] data.close() #y_train = np_utils.to_categorical(y_train, 16) return train_test_split(x_train, y_train, test_size= (1 - split_ratio), random_state=random_state, shuffle=True) def save_model(model, model_name): file_path = 'model/{}.h5'.format(model_name) if not os.path.exists(os.path.dirname(file_path)): try: os.makedirs(os.path.dirname(file_path)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise model.save(file_path) def plot_loss(history, file_name): file_path = 'images/{}.png'.format(file_name) if not os.path.exists(os.path.dirname(file_path)): try: os.makedirs(os.path.dirname(file_path)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise plt.figure() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model train vs validation loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.savefig(file_path) plt.show() def plot_accuracy(history, file_name): file_path = 'images/{}.png'.format(file_name) if not os.path.exists(os.path.dirname(file_path)): try: os.makedirs(os.path.dirname(file_path)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise plt.figure() plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model train vs validation accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(file_path) plt.show() if __name__ == "__main__": preprocess_data() #file_path = 'raw_data/Actor_08/03-01-08-01-02-01-08.wav' #file_name = '03-01-08-01-02-01-08' #mfcc = wav2mfcc(file_path, sr=None, offset=0.5, duration=2.5, n_mfcc=13)
nilq/baby-python
python
# # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. import asynctest from pathlib import Path from irmacl_async.apiclient import AAPI SAMPLES_DIR = Path(__file__).parent / "samples" ZIP_SAMPLE = "zipbomb.zip" class TestZipBomb(asynctest.TestCase): async def test_zipbomb(self): async with AAPI() as api: probelist = await api.probes.list() probe = 'Unarchive' if probe not in probelist: raise asynctest.SkipTest( "Skipping {} not present".format(probe)) sample = SAMPLES_DIR / ZIP_SAMPLE scan = api.scans.scan( [sample], linger=True, probes=[probe], force=True) self.assertEqual(len(scan.results), 1) self.assertEqual(scan.probes_finished, 1) result = await api.scans.result(scan.results[0]) self.assertEqual(len(result.probe_results), 1) probe_result = result.probe_results[0] self.assertEqual(probe_result.status, -1) self.assertIsNotNone(probe_result.error) self.assertNone(probe_result.results) if __name__ == "__main__": asynctest.main()
nilq/baby-python
python
from django.db import models class Category(models.Model): name = models.CharField(max_length=128, unique=True) def __str__(self): return self.name class Page(models.Model): category = models.ForeignKey(Category, on_delete=models.CASCADE) title = models.CharField(max_length=128) url = models.URLField() views = models.IntegerField(default=0) def __str__(self): return self.title
nilq/baby-python
python
#!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # mock, just outputs empty .h/.cpp files import os import sys if len(sys.argv) == 2: basename, ext = os.path.splitext(sys.argv[1]) with open('%s.h' % basename, 'w') as f: f.write('// %s.h\n' % basename) with open('%s.cpp' % basename, 'w') as f: f.write('// %s.cpp\n' % basename)
nilq/baby-python
python
from .unigram import UniGramModel
nilq/baby-python
python
import os import pandas as pd jaea_fns_175 = pd.read_csv(os.path.join(__path__[0], "JAEA_FNS_175.csv")).set_index("E")
nilq/baby-python
python
import torch import torch.nn as nn from utils import split_data,read_json_file, get_text from dataset import my_dataset,my_collate_fn from model import my_model,weights_init from engine import train_fn,eval_fn import cv2 from sklearn import model_selection import pandas as pd vocab="- !#$%&'()*+,./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`lr{|}~\"" num_cha=len(vocab) print(num_cha) data=read_json_file(path='../data/For_task_2/data.json') img_paths=list(data.keys()) txt_paths=list(data.values()) batch_size=32 X_train, X_val, y_train, y_val = model_selection.train_test_split(img_paths, txt_paths, test_size=0.2, random_state=1) train_dataset = my_dataset(X_train,y_train,vocab) val_dataset = my_dataset(X_val,y_val,vocab) #test_dataset = my_dataset(X_test,y_test) print(len(train_dataset)) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size, shuffle=True, collate_fn=my_collate_fn,) val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size, shuffle=False, collate_fn=my_collate_fn,) #test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size, shuffle=False,collate_fn=my_collate_fn,) model=my_model(num_cha) model.apply(weights_init) NUM_EPOCHS=50 device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Using ",device) model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=3e-4) MODEL_SAVE_PATH = './weights/my_model.pth' # model.load_state_dict(torch.load(MODEL_SAVE_PATH)) def train(model,MODEL_SAVE_PATH ,NUM_EPOCHS,optimizer): best_val_loss=999 print("Training...") log=[] for epoch in range(1,NUM_EPOCHS+1): train_loss = train_fn(model, train_dataloader, optimizer,device) val_loss = eval_fn(model, val_dataloader,device) log_epoch = {"epoch": epoch, "train_loss": train_loss, "val_loss": val_loss} log.append(log_epoch) df = pd.DataFrame(log) df.to_csv("./weights/logs2.csv") if val_loss < best_val_loss: best_val_loss = val_loss torch.save(model.state_dict(),MODEL_SAVE_PATH) print("Epoch {} || epoch_train_loss: {:.4f} || Epoch_val_loss: {:.4f} ".format(epoch + 1,train_loss, val_loss)) train(model,MODEL_SAVE_PATH ,NUM_EPOCHS,optimizer)
nilq/baby-python
python
# creating a tupples #empty tupple s1=() print('s1 : ',s1) #tupple with multiple elements and accessing it s2=(2782,'thakur',99) print('s2 : ',s2) #another way to create tupples and access them S3=(82,85,96,56,70,99) print('S3 : ',S3) s4=74,'sandeep',90 print('s4 : ',s4) s3=(82) print('s3=(82): ',s3) #creating new tupple and including previous tupple values in it s5=s1,(27,'thakur') print('s5=s1,(27,\'thakur\') : ',s5) #1 slicing print(S3[0]) print('s3[0] : ',S3[0]) print('s3[::-1] : ',S3[::-1]) print('s3[0:2] : ',S3[0:2]) #2 add i.e concatination print('s3+s2 : ',S3+s2) #3 replication print('s3*5 : ',S3*5) #some functions of tupples print('min(s3) : ',min(S3)) print('max(s3) : ',max(S3)) print('len(s3) : ',len(S3))
nilq/baby-python
python
P = 10 objects = [(5, 18),(2, 9), (4, 12), (6,25)] print("Items available: ",objects) print("***********************************") objects = filter(lambda x: x[0]<=P, objects) objects = sorted(objects, key=lambda x: x[1]/x[0], reverse=True) weight, value, subset = 0, 0, [] print("Items filtered and sorted: ",objects) print("***********************************") for item in objects: if weight + item[0] <= P: weight = weight + item[0] value = value + item[1] subset.append(item) print("Subset selected: ",subset) print("Total value: " ,value) print("Total weight: ",weight)
nilq/baby-python
python
from setuptools import setup, find_packages setup( name="JsonDataManager", license="MIT", version="1.0", author="PieSignal", author_email="leeon@insiro.me", url="https://github.com/PieSignal/JsonDataManager", requires=["typing >= 3.7.4.1, <4"], packages=find_packages(), )
nilq/baby-python
python
import json import re import sys from math import sin, cos, sqrt, atan2, radians def main(): LAT_ORIGIN = radians(39.103119) # YOUR LOCATION LATITUDE IN ( ) LON_ORIGIN = radians(-84.512016) # YOUR LOCATION LONGITUDE IN ( ) radius_of_earth = 6378.0 results = [] with open("list.txt") as airports: with open('airports.json') as json_file: data = json.load(json_file) for line in airports: if line.strip(): regex = r"\((.*)\)" matches = re.search(regex, line) if matches: DEST = "K" + matches.group(1) #for airport in data: airport = data[DEST] #if DEST == airport: lat2 = radians(airport["lat"]) lon2 = radians(airport["lon"]) dlon = lon2 - LON_ORIGIN dlat = lat2 - LAT_ORIGIN a = sin(dlat / 2)**2 + cos(LAT_ORIGIN) * \ cos(lat2) * sin(dlon / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) if (len(sys.argv) > 1): if (sys.argv[1] == "-km"): distance = radius_of_earth * c else: distance = radius_of_earth * c * .621371 else: distance = radius_of_earth * c * .621371 result = { "name": airport["name"], "distance": distance } results.append(result) results = [dict(t) for t in {tuple(d.items()) for d in results}] results = sorted(results, key=lambda k: k['distance']) for result in results: print(result) if __name__ == "__main__": import time start = time.time() main() end = time.time() print(end-start)
nilq/baby-python
python
import vcf import argparse from record import Record, PhaseSet, ChromosomoHaplotype from stats import PhaseSetStats, HapStats def get_phase_set_stats(template_phase_set:PhaseSet, phase_set:PhaseSet): prev_record: Record record: Record t_record: Record t_prev_record: Record record_count = 0 switch_error_count = 0 mismatch_error_count = 0 total_record = len(phase_set.records_idx) prev_switch_error = False last_record_pos = 0 last_record_idx = 0 first_record_idx = 0 for record_pos in phase_set.records.keys(): record = phase_set.records[record_pos] record_count += 1 t_record = template_phase_set.records[record_pos] if record_count == total_record: last_record_idx = record.idx last_record_pos = record.pos if record_count == 1: prev_record = record first_record_idx = record.idx t_prev_record = t_record else: switched = record.switched(prev_record) t_switched = t_record.switched(t_prev_record) if switched != t_switched: # switch error if record_count > 2 and record_count < total_record: switch_error_count += 1 if prev_switch_error: # mismatch error mismatch_error_count += 1 switch_error_count -= 2 prev_switch_error = False else: prev_switch_error = True else: #no switch error for ajunct pos, reset prev_switch_error = False prev_record = record t_prev_record = t_record S50 = total_record N50 = last_record_pos - phase_set.starting_pos spaned_record = last_record_idx - first_record_idx + 1 AN50 = N50/spaned_record * S50 return AN50, S50, N50, switch_error_count, mismatch_error_count, spaned_record def get_haplotype_stats_chromo(template_chromo:ChromosomoHaplotype, in_chromo:ChromosomoHaplotype, out, contig): template_phase_set:PhaseSet phase_set : PhaseSet template_phase_set = list(template_chromo.chromo_phase_set.values() )[0] chromo_snp_count = len(template_phase_set.records_idx) chromo_span = max(template_phase_set.records_idx) - min(template_phase_set.records_idx) hap_stats = HapStats(chromo_snp_count, chromo_span) index = 0 for phase_set in in_chromo.chromo_phase_set.values(): AN50, S50, N50, switch_error_count, mismatch_error_count, spanned_snp = get_phase_set_stats(template_phase_set, phase_set) phase_set_stats = PhaseSetStats(switch_error_count, mismatch_error_count, S50, N50, AN50, spanned_snp) if S50 < 2: continue hap_stats.insert_phase_set_stats(0, phase_set_stats) index += 1 out.write("%s\t%d\t%d\t%d\t%d\t%.8f\t%.8f\n" % (contig, phase_set_stats.get_AN50(), phase_set_stats.get_N50(), phase_set_stats.get_phased_snp(), spanned_snp, phase_set_stats.get_switch_error(), phase_set_stats.get_mismatch_error())) out.write("%s\t%d\t%d\t%d\t%d\t%.8f\t%.8f\n" % (contig + "_total", hap_stats.get_AN50(), hap_stats.get_N50(), hap_stats.get_total_phased(), hap_stats.get_total_spanned(), hap_stats.get_switch_error(), hap_stats.get_mismatch_error())) return hap_stats def get_haplotype_stats(template_vcf:vcf.Reader, in_vcf:vcf.Reader, out): contigs = in_vcf.contigs.keys() hap_stats = HapStats() for contig in contigs: try: template_vcf.fetch(contig) template_chromo = ChromosomoHaplotype(template_vcf, contig) in_chromo = ChromosomoHaplotype(in_vcf, contig) chromo_hap_stats = get_haplotype_stats_chromo(template_chromo, in_chromo, out, contig) hap_stats.insert_hap_stats(chromo_hap_stats) except: continue out.write("%s\t%d\t%d\t%d\t%d\t%.8f\t%.8f\n" % ("total", hap_stats.get_AN50(), hap_stats.get_N50(), hap_stats.get_total_phased(), hap_stats.get_total_spanned(),hap_stats.get_switch_error(), hap_stats.get_mismatch_error())) def main(): parser = argparse.ArgumentParser('phaseset_to_vcf.py') parser.add_argument('-t', '--template', help='template vcf, indexed', required=True) parser.add_argument('-v', '--vcf', help='input vcf, indexed', required=True) parser.add_argument('-o', '--out', help='output stats', required=True) options = parser.parse_args() in_vcf = vcf.Reader(filename=options.vcf) template_vcf = vcf.Reader(filename=options.template) outf = open(options.out, 'w') outf.write("Chromosome\tAN50\tN50\tphased_snp\ttotal_snp\tswitch_error_rate\tmismatch_error_rate\n") get_haplotype_stats(template_vcf, in_vcf, outf) outf.close() return if __name__ == '__main__': main()
nilq/baby-python
python
# File that prepares the transcripts into CSV for insertion into the database # Created by Thomas Orth import pandas as pd import sys # CHANGE THESE VALUES DEPENDING ON THE TRANSCRIPT name = "Charles Terry" summary = "Charles Terry is interviewed about his life in old trenton and other aspects such as working for the Board of Education." audio_path = "https://archive.org/download/CharlesTerryInterview415115/Charles%20Terry%20Interview%204%EF%80%A215%EF%80%A215.MP3" text_path = "charles.pdf" title = "Charles Terry Interview Transcription" content = "" # Read raw transcript data with open(sys.argv[1]) as f: content = ''.join(f.readlines()) # Prepare the transcript csv x = pd.DataFrame(columns=['title', 'text_file_path', 'audio_file_path', 'summary', 'text_content'], data=[[title, text_path, audio_path, summary, content.replace('"', '')]]) x.to_csv("insert_data_transcript.csv", sep="|", index=False) # Prepare the participants csv participants = [[name]] p = pd.DataFrame(columns=['name'], data=participants) p.to_csv("insert_data_participants.csv", sep="|", index=False) # Prepare the locations CSV locations = [["Mercer Street"]] l = pd.DataFrame(columns=['street_name'], data=locations) l.to_csv("insert_data_locations.csv", sep="|", index=False) # Prepare the keywords CSV keywords = [["charles"], ["neighborhood"]] k = pd.DataFrame(columns=['keyword'], data=keywords) k.to_csv('insert_data_keywords.csv', sep="|", index=False)
nilq/baby-python
python
from __future__ import print_function import numpy as np from collections import defaultdict import matplotlib.pyplot as plt import matplotlib.patches as patches class PQTNode: """PQT Node class""" def __init__(self, bounds=[[0., 1.], [0., 1.]]): self.children = [] self.bounds = bounds self.content = defaultdict(list) self.p = 0. def __str__(self): return "[{:.3},{:.3}]x[{:.3},{:.3}] ".format(self.bounds[0][0], self.bounds[0][1], self.bounds[1][0], self.bounds[1][1]) \ + "{} chldrn {:.3} prb".format(len(self.children), self.p) def __repr__(self): return "PQTNode({}, {})".format(self.bounds[0], self.bounds[1]) def split(self): """ Adds children to the current node """ x0, x1 = self.bounds[0] y0, y1 = self.bounds[1] xc, yc = 0.5*(x0+x1), 0.5*(y0+y1) # Add subcoordinates self.children = [ PQTNode([[x0,xc],[y0,yc]]), PQTNode([[xc,x1],[y0,yc]]), PQTNode([[xc,x1],[yc,y1]]), PQTNode([[x0,xc],[yc,y1]]) ] return self.children def encloses(self, coord): """ Checks if point passed is bounded Parameters: coord - tuple of point Returns: whether or not enclosing """ x0, x1 = self.bounds[0] y0, y1 = self.bounds[1] return x0 <= coord[0] < x1 \ and y0 <= coord[1] < y1 def draw(self, ax, show_prob=False, p_hat=0.01): """ Draws a rectangle corresponding to the cell""" x0, x1 = self.bounds[0] y0, y1 = self.bounds[1] ax.add_patch(patches.Rectangle((x0,y0), x1-x0, y1-y0, fill=None, linewidth=0.5)) if show_prob: ax.add_patch(patches.Rectangle((x0,y0), x1-x0, y1-y0, linewidth=0.5, alpha=self.p/p_hat, facecolor="red")) def center(self): return [0.5*sum(self.bounds[0]), 0.5*sum(self.bounds[1])] class PQTDecomposition: """PQT Decomposition data structure class""" def __init__(self): self.root = PQTNode() self.leaves = [] def from_points(self, points=[], p_hat=0.01): """ Initialize from points Parameters: points - list of sample point tuples, p_hat - maximum probability of a leaf, """ n_pts = float(len(points)) # Check that atoms do not have probability higher than p_hat, if they # are then we set p_hat to the probability of an atom. atom_p = 1./n_pts self.p_hat = atom_p if (atom_p > p_hat) else p_hat def gen_pqt(node, pts): node.p = len(pts)/n_pts # The first condition is the subpartitioning rule for a pqt. if node.p >= p_hat and len(pts) > 1: # Add children to the current node node.split() # For each new node, generate from all points that fall inside # the cell for child in node.children: gen_pqt(child, [pt for pt in pts if child.encloses(pt)]) else: # Otherwise the node is a leaf, so add it self.leaves.append(node) # Start recursion through the root node gen_pqt(self.root, points) return self def from_pdf(self, pdf, p_hat=0.01): """ Initialize from pdf Parameters: pdf - function f(x,y) with compact support contained in the bounding square p_hat - maximum probability of a leaf """ from scipy.integrate import nquad self.p_hat = p_hat def gen_pqt(node): # Compute the probability over the cell node.p,_ = nquad(pdf, node.bounds) # If the probability is too high then split the cell and generate # sub-trees if node.p >= p_hat: node.split() for child in node.children: gen_pqt(child) else: # Otherwise the node is a leaf self.leaves.append(node) gen_pqt(self.root) return self def __ref__(self): return "PQTDecomposition()" def __str__(self): print_str = "" # Store node, depth data on stack. Work through tree depth first node_stack = [(self.root, 0)] # If there are things on the stack while node_stack: node, depth = node_stack.pop() i = None for i in xrange(depth): print_str += " " else: if i is not None: print_str += "- " print_str += str(node) + "\n" # If the node has children then process them next on the stack for child in node.children: node_stack.append((child,depth+1)) return print_str def enclosing_leaf(self, coords): def _get_leaf(node): # Check all children (if any) for child in node.children: # Search down branch if contains coord if child.encloses(coords): return _get_leaf(child) return node # Check if the point is enclosed by the pqt if self.root.encloses(coords): return _get_leaf(self.root) return None def add_point(self, coord, attr='pts'): leaf = self.enclosing_leaf(coord) if not leaf: return False leaf.content[attr].append(coord) return True def add_points(self, coords, attr='pts'): all_suc = True for coord in coords: all_suc &= self.add_point(coord, attr=attr) def draw(self, show_prob=False): """ Draws the pqt using matplotlib Parameters: show_prob - whether or not probability should be displayed as a shade """ fig = plt.figure() ax = fig.add_subplot(111, aspect='equal') for leaf in self.leaves: leaf.draw(ax, show_prob=show_prob, p_hat=self.p_hat) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.plot() plt.show() if __name__ == "__main__": from random import random #n_pts = 1000 #pts = [(random(),random()) for i in xrange(n_pts)] #decomp = PQTDecomposition().from_points(pts, p_hat=0.001, store=True) def pdf(x, y): return 3./4. * (2 - x**2 - y**2) decomp = PQTDecomposition().from_pdf(pdf, p_hat=0.001) empt_leaf = decomp.enclosing_leaf([0.9,0.9]) decomp.draw(show_prob=True)
nilq/baby-python
python
''' ''' ''' ISC License Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ''' # # Basilisk Scenario Script and Integrated Test # # Purpose: Integrated test of the spacecraftPlus(), extForceTorque, simpleNav(), # MRP_Feedback() with attitude navigation modules. Illustrates how # attitude guidance behavior can be changed in a very modular manner. # Author: Hanspeter Schaub # Creation Date: Dec. 2, 2016 # import pytest import os import numpy as np # import general simulation support files from Basilisk.utilities import SimulationBaseClass from Basilisk.utilities import unitTestSupport # general support file with common unit test functions import matplotlib.pyplot as plt from Basilisk.utilities import macros from Basilisk.utilities import orbitalMotion from Basilisk.utilities import RigidBodyKinematics # import simulation related support from Basilisk.simulation import spacecraftPlus from Basilisk.simulation import extForceTorque from Basilisk.utilities import simIncludeGravBody from Basilisk.simulation import simple_nav # import FSW Algorithm related support from Basilisk.fswAlgorithms import MRP_Feedback from Basilisk.fswAlgorithms import hillPoint from Basilisk.fswAlgorithms import attTrackingError # import message declarations from Basilisk.fswAlgorithms import fswMessages # Plotting functions def plot_attitude_error(timeLineSet, dataSigmaBR): plt.figure(1) fig = plt.gcf() ax = fig.gca() vectorData = unitTestSupport.pullVectorSetFromData(dataSigmaBR) sNorm = np.array([np.linalg.norm(v) for v in vectorData]) plt.plot(timeLineSet, sNorm, color=unitTestSupport.getLineColor(1, 3), ) plt.xlabel('Time [min]') plt.ylabel('Attitude Error Norm $|\sigma_{B/R}|$') ax.set_yscale('log') def plot_control_torque(timeLineSet, dataLr): plt.figure(2) for idx in range(1, 4): plt.plot(timeLineSet, dataLr[:, idx], color=unitTestSupport.getLineColor(idx, 3), label='$L_{r,' + str(idx) + '}$') plt.legend(loc='lower right') plt.xlabel('Time [min]') plt.ylabel('Control Torque $L_r$ [Nm]') def plot_rate_error(timeLineSet, dataOmegaBR): plt.figure(3) for idx in range(1, 4): plt.plot(timeLineSet, dataOmegaBR[:, idx], color=unitTestSupport.getLineColor(idx, 3), label='$\omega_{BR,' + str(idx) + '}$') plt.legend(loc='lower right') plt.xlabel('Time [min]') plt.ylabel('Rate Tracking Error [rad/s] ') return def plot_orientation(timeLineSet, dataPos, dataVel, dataSigmaBN): vectorPosData = unitTestSupport.pullVectorSetFromData(dataPos) vectorVelData = unitTestSupport.pullVectorSetFromData(dataVel) vectorMRPData = unitTestSupport.pullVectorSetFromData(dataSigmaBN) data = np.empty([len(vectorPosData), 3]) for idx in range(0, len(vectorPosData)): ir = vectorPosData[idx] / np.linalg.norm(vectorPosData[idx]) hv = np.cross(vectorPosData[idx], vectorVelData[idx]) ih = hv / np.linalg.norm(hv) itheta = np.cross(ih, ir) dcmBN = RigidBodyKinematics.MRP2C(vectorMRPData[idx]) data[idx] = [np.dot(ir, dcmBN[0]), np.dot(itheta, dcmBN[1]), np.dot(ih, dcmBN[2])] plt.figure(4) labelStrings = (r'$\hat\imath_r\cdot \hat b_1$' , r'${\hat\imath}_{\theta}\cdot \hat b_2$' , r'$\hat\imath_h\cdot \hat b_3$') for idx in range(0, 3): plt.plot(timeLineSet, data[:, idx], color=unitTestSupport.getLineColor(idx + 1, 3), label=labelStrings[idx]) plt.legend(loc='lower right') plt.xlabel('Time [min]') plt.ylabel('Orientation Illustration') ## \defgroup Tutorials_2_1 ## @{ ## How to use guidance modules to align the spacecraft frame to the orbit or Hill frame. # # Attitude Alignment with Hill Orbit Frame {#scenarioAttitudeGuidance} # ==== # # Scenario Description # ----- # This script sets up a 6-DOF spacecraft which is orbiting the Earth. The scenario is # setup to be run in two different setups: # Setup | useAltBodyFrame # ----- | ------------------- # 1 | False # 2 | True # # To run the default scenario 1., call the python script through # # python scenarioAttitudeGuidance.py # # The simulation layout is shown in the following illustration. A single simulation process is created # which contains both the spacecraft simulation modules, as well as the Flight Software (FSW) algorithm # modules. # ![Simulation Flow Diagram](Images/doc/test_scenarioAttitudeGuidance.svg "Illustration") # # When the simulation completes 4 plots are shown for the MRP attitude history, the rate # tracking errors, the control torque vector, as well as the projection of the body-frame B # axes \f$\hat b_1\f$, b2 and b3 onto the respect Hill or Orbit frame axes \f$\hat\imath_r\f$, # \f$\hat\imath_{\theta}\f$ and \f$\hat\imath_h\f$. This latter plot illustrates how the body # is being aligned with respect to this Hill frame. # # The basic simulation setup is the same as the one used in # [scenarioAttitudeFeedback.py](@ref scenarioAttitudeFeedback). # The dynamics simulation is setup using a SpacecraftPlus() module to which a gravity # effector is attached. Note that both the rotational and translational degrees of # freedom of the spacecraft hub are turned on here to get a 6-DOF simulation. For more # information on how to setup orbit, see [scenarioBasicOrbit.py](@ref scenarioBasicOrbit) # # However, instead of doing an inertial pointing maneuver, here the hillFrame() attitude guidance module # is used: # ~~~~~~~~~~~~~{.py} # attGuidanceConfig = hillPoint.hillPointConfig() # attGuidanceWrap = scSim.setModelDataWrap(attGuidanceConfig) # attGuidanceWrap.ModelTag = "hillPoint" # attGuidanceConfig.inputNavDataName = sNavObject.outputTransName # attGuidanceConfig.inputCelMessName = earth.bodyInMsgName # attGuidanceConfig.outputDataName = "guidanceOut" # scSim.AddModelToTask(simTaskName, attGuidanceWrap, attGuidanceConfig) # ~~~~~~~~~~~~~ # # In contrast to the simple inertial pointing guidance module, this module also requires the # spacecraft's position and velocity information. The planet ephemeris message relative to which the Hill pointing # is being achieved by setting the `inputCelMessName` message. # This is useful, for example, if orbiting the sun, and wanting to point the spacecraft back at the # Earth which is also orbiting the sun. In this scenario, however, the spacecraft is to point at the # Earth while already orbiting the Earth. Thus, this planet ephemeris input message is not set, which # in return zeros the planets position and velocity vector states in the guidance module. # # # Setup 1 # ----- # # Which scenario is run is controlled at the bottom of the file in the code # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( # True, # show_plots # False # useAltBodyFrame # ) # ~~~~~~~~~~~~~ # The first 2 arguments can be left as is. The remaining argument(s) control the # simulation scenario flags to turn on or off certain simulation conditions. The # default scenario shown has the `useAltBodyFrame` flag turned off. This means that we seek # to align the body frame *B* with the Hill reference frame *R*. The # resulting attitude and control torque histories are shown below. Note that the projections # of the body frame axes onto the Hill frame axes all converge to +1, indidcating that B becomes # asympotically aligned with R as desired. # ![MRP Attitude History](Images/Scenarios/scenarioAttitudeGuidance10.svg "MRP history") # ![Control Torque History](Images/Scenarios/scenarioAttitudeGuidance20.svg "Torque history") # ![Body/Hill Frame Axis Projections](Images/Scenarios/scenarioAttitudeGuidance40.svg "Axes Projection") # # # Setup 2 # ----- # # To run the second scenario, change the main routine at the bottom of the file to read: # ~~~~~~~~~~~~~{.py} # if __name__ == "__main__": # run( # True, # show_plots # True # useAltBodyFrame # ) # ~~~~~~~~~~~~~ # Here the control should not align the principal body frame *B* with *R*, but rather an alternate, # corrected body frame *Bc*. For example, consider the Earth observing sensors to be mounted pointing in the # positive \f$\hat b_1\f$ direction. In scenario 1 this sensor platform is actually pointing away from # the Earth. Thus, we define the corrected body frame orientation as a 180 deg rotation about # \f$\hat b_2\f$. This flips the orientation of the final first and third body axis. This is achieved # through: # ~~~~~~~~~~~~~{.py} # attErrorConfig.sigma_R0R = [0,1,0] # ~~~~~~~~~~~~~ # The DCM \f$[R_0R]\f$ is the same as the body to corrected body DCM \f$[B_cB]\f$. # The resulting attitude and control torque histories are shown below. Note that the projections # of the 2nd body frame axis onto the 2nd Hill frame axes converges to +1, while the other # projections converge to -1. This indicates that the desired asymptotic Earth observing attitude # is achieved. # ![MRP Attitude History](Images/Scenarios/scenarioAttitudeGuidance11.svg "MRP history") # ![Control Torque History](Images/Scenarios/scenarioAttitudeGuidance21.svg "Torque history") # ![Body/Hill Frame Axis Projections](Images/Scenarios/scenarioAttitudeGuidance41.svg "Axes Projection") # ## @} def run(show_plots, useAltBodyFrame): '''Call this routine directly to run the tutorial scenario.''' # Create simulation variable names simTaskName = "simTask" simProcessName = "simProcess" # Create a sim module as an empty container scSim = SimulationBaseClass.SimBaseClass() scSim.TotalSim.terminateSimulation() # set the simulation time variable used later on simulationTime = macros.min2nano(10.) # # create the simulation process # dynProcess = scSim.CreateNewProcess(simProcessName) # create the dynamics task and specify the integration update time simulationTimeStep = macros.sec2nano(0.1) dynProcess.addTask(scSim.CreateNewTask(simTaskName, simulationTimeStep)) # if this scenario is to interface with the BSK Viz, uncomment the following lines # unitTestSupport.enableVisualization(scSim, dynProcess, simProcessName, 'earth') # The Viz only support 'earth', 'mars', or 'sun' # # setup the simulation tasks/objects # # initialize spacecraftPlus object and set properties scObject = spacecraftPlus.SpacecraftPlus() scObject.ModelTag = "spacecraftBody" # define the simulation inertia I = [900., 0., 0., 0., 800., 0., 0., 0., 600.] scObject.hub.mHub = 750.0 # kg - spacecraft mass scObject.hub.r_BcB_B = [[0.0], [0.0], [0.0]] # m - position vector of body-fixed point B relative to CM scObject.hub.IHubPntBc_B = unitTestSupport.np2EigenMatrix3d(I) # add spacecraftPlus object to the simulation process scSim.AddModelToTask(simTaskName, scObject) # clear prior gravitational body and SPICE setup definitions gravFactory = simIncludeGravBody.gravBodyFactory() # setup Earth Gravity Body earth = gravFactory.createEarth() earth.isCentralBody = True # ensure this is the central gravitational body mu = earth.mu # attach gravity model to spaceCraftPlus scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values()) # # initialize Spacecraft States with initialization variables # # setup the orbit using classical orbit elements oe = orbitalMotion.ClassicElements() oe.a = 10000000.0 # meters oe.e = 0.1 oe.i = 33.3 * macros.D2R oe.Omega = 48.2 * macros.D2R oe.omega = 347.8 * macros.D2R oe.f = 85.3 * macros.D2R rN, vN = orbitalMotion.elem2rv(mu, oe) scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_CN_N scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_CN_N scObject.hub.sigma_BNInit = [[0.1], [0.2], [-0.3]] # sigma_BN_B scObject.hub.omega_BN_BInit = [[0.001], [-0.01], [0.03]] # rad/s - omega_BN_B # setup extForceTorque module # the control torque is read in through the messaging system extFTObject = extForceTorque.ExtForceTorque() extFTObject.ModelTag = "externalDisturbance" # use the input flag to determine which external torque should be applied # Note that all variables are initialized to zero. Thus, not setting this # vector would leave it's components all zero for the simulation. scObject.addDynamicEffector(extFTObject) scSim.AddModelToTask(simTaskName, extFTObject) # add the simple Navigation sensor module. This sets the SC attitude, rate, position # velocity navigation message sNavObject = simple_nav.SimpleNav() sNavObject.ModelTag = "SimpleNavigation" scSim.AddModelToTask(simTaskName, sNavObject) # # setup the FSW algorithm tasks # # setup hillPoint guidance module attGuidanceConfig = hillPoint.hillPointConfig() attGuidanceWrap = scSim.setModelDataWrap(attGuidanceConfig) attGuidanceWrap.ModelTag = "hillPoint" attGuidanceConfig.inputNavDataName = sNavObject.outputTransName attGuidanceConfig.inputCelMessName = earth.bodyInMsgName attGuidanceConfig.outputDataName = "guidanceOut" scSim.AddModelToTask(simTaskName, attGuidanceWrap, attGuidanceConfig) # setup the attitude tracking error evaluation module attErrorConfig = attTrackingError.attTrackingErrorConfig() attErrorWrap = scSim.setModelDataWrap(attErrorConfig) attErrorWrap.ModelTag = "attErrorInertial3D" scSim.AddModelToTask(simTaskName, attErrorWrap, attErrorConfig) attErrorConfig.outputDataName = "attErrorMsg" if useAltBodyFrame: attErrorConfig.sigma_R0R = [0, 1, 0] attErrorConfig.inputRefName = attGuidanceConfig.outputDataName attErrorConfig.inputNavName = sNavObject.outputAttName # setup the MRP Feedback control module mrpControlConfig = MRP_Feedback.MRP_FeedbackConfig() mrpControlWrap = scSim.setModelDataWrap(mrpControlConfig) mrpControlWrap.ModelTag = "MRP_Feedback" scSim.AddModelToTask(simTaskName, mrpControlWrap, mrpControlConfig) mrpControlConfig.inputGuidName = attErrorConfig.outputDataName mrpControlConfig.vehConfigInMsgName = "vehicleConfigName" mrpControlConfig.outputDataName = extFTObject.cmdTorqueInMsgName mrpControlConfig.K = 3.5 mrpControlConfig.Ki = -1.0 # make value negative to turn off integral feedback mrpControlConfig.P = 30.0 mrpControlConfig.integralLimit = 2. / mrpControlConfig.Ki * 0.1 mrpControlConfig.domega0 = [0.0, 0.0, 0.0] # # Setup data logging before the simulation is initialized # numDataPoints = 100 samplingTime = simulationTime / (numDataPoints - 1) scSim.TotalSim.logThisMessage(mrpControlConfig.outputDataName, samplingTime) scSim.TotalSim.logThisMessage(attErrorConfig.outputDataName, samplingTime) scSim.TotalSim.logThisMessage(sNavObject.outputTransName, samplingTime) scSim.TotalSim.logThisMessage(sNavObject.outputAttName, samplingTime) # # create simulation messages # # create the FSW vehicle configuration message vehicleConfigOut = fswMessages.VehicleConfigFswMsg() vehicleConfigOut.ISCPntB_B = I # use the same inertia in the FSW algorithm as in the simulation unitTestSupport.setMessage(scSim.TotalSim, simProcessName, mrpControlConfig.vehConfigInMsgName, vehicleConfigOut) # # initialize Simulation # scSim.InitializeSimulationAndDiscover() # # configure a simulation stop time time and execute the simulation run # scSim.ConfigureStopTime(simulationTime) scSim.ExecuteSimulation() # # retrieve the logged data # dataLr = scSim.pullMessageLogData(mrpControlConfig.outputDataName + ".torqueRequestBody", range(3)) dataSigmaBR = scSim.pullMessageLogData(attErrorConfig.outputDataName + ".sigma_BR", range(3)) dataOmegaBR = scSim.pullMessageLogData(attErrorConfig.outputDataName + ".omega_BR_B", range(3)) dataPos = scSim.pullMessageLogData(sNavObject.outputTransName + ".r_BN_N", range(3)) dataVel = scSim.pullMessageLogData(sNavObject.outputTransName + ".v_BN_N", range(3)) dataSigmaBN = scSim.pullMessageLogData(sNavObject.outputAttName + ".sigma_BN", range(3)) np.set_printoptions(precision=16) # # plot the results # fileName = os.path.basename(os.path.splitext(__file__)[0]) timeLineSet = dataSigmaBR[:, 0] * macros.NANO2MIN plt.close("all") # clears out plots from earlier test runs plot_attitude_error(timeLineSet, dataSigmaBR) figureList = {} pltName = fileName + "1" + str(int(useAltBodyFrame)) figureList[pltName] = plt.figure(1) plot_control_torque(timeLineSet, dataLr) pltName = fileName + "2" + str(int(useAltBodyFrame)) figureList[pltName] = plt.figure(2) plot_rate_error(timeLineSet, dataOmegaBR) plot_orientation(timeLineSet, dataPos, dataVel, dataSigmaBN) pltName = fileName + "4" + str(int(useAltBodyFrame)) figureList[pltName] = plt.figure(4) if show_plots: plt.show() # close the plots being saved off to avoid over-writing old and new figures plt.close("all") return dataPos, dataSigmaBN, numDataPoints, figureList # # This statement below ensures that the unit test scrip can be run as a # stand-along python script # if __name__ == "__main__": run( True, # show_plots False # useAltBodyFrame )
nilq/baby-python
python
class Matrix(object): def __init__(self, matrix_string): self.__matrix = [[int(el) for el in line.split()] for line in matrix_string.splitlines()] def row(self, index): return self.__matrix[index-1].copy() def column(self, index): return [el[index-1] for el in self.__matrix]
nilq/baby-python
python
def texto(num): cores = {'Vermelho': '\033[31;1m', 'Azul': '\033[1;34m', 'Limpa': '\033[m'} print(f'{cores["Vermelho"]}ERRO! "{cores["Azul"]}{num}{cores["Vermelho"]}" não é um valor válido!{cores["Limpa"]}') def leiadinheiro(msg): while True: resp = str(input(msg)).strip() resp1 = resp.replace(' ', '') resp1 = resp1.replace(',', '.') if '.' in resp1: cont = 0 val = True for pos, info in enumerate(resp1+' '): if cont > 1: texto(resp) break if str(info).isalpha(): val = False texto(resp) break if info in '.': cont += 1 if info == ' ': if len(resp1) == 1: texto(resp) break if cont == 1 and len(resp1) != 1 and val: break elif resp1.isnumeric(): break else: texto(resp) return float(resp1)
nilq/baby-python
python
# Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Static data and helper functions.""" import math import re import sys import time import boto from third_party.retry_decorator.decorators import retry # We don't use the oauth2 authentication plugin directly; importing it here # ensures that it's loaded and available by default. Note: we made this static # state instead of Command instance state because the top-level gsutil code # needs to check it. HAVE_OAUTH2 = False try: from oauth2_plugin import oauth2_helper HAVE_OAUTH2 = True except ImportError: pass TWO_MB = 2 * 1024 * 1024 NO_MAX = sys.maxint # Binary exponentiation strings. _EXP_STRINGS = [ (0, 'B', 'bit'), (10, 'KB', 'kbit'), (20, 'MB', 'Mbit'), (30, 'GB', 'Gbit'), (40, 'TB', 'Tbit'), (50, 'PB', 'Pbit'), ] # Detect platform types. IS_WINDOWS = 'win32' in str(sys.platform).lower() IS_LINUX = 'linux' in str(sys.platform).lower() IS_OSX = 'darwin' in str(sys.platform).lower() Retry = retry # Enum class for specifying listing style. class ListingStyle(object): SHORT = 'SHORT' LONG = 'LONG' LONG_LONG = 'LONG_LONG' def HasConfiguredCredentials(): """Determines if boto credential/config file exists.""" config = boto.config has_goog_creds = (config.has_option('Credentials', 'gs_access_key_id') and config.has_option('Credentials', 'gs_secret_access_key')) has_amzn_creds = (config.has_option('Credentials', 'aws_access_key_id') and config.has_option('Credentials', 'aws_secret_access_key')) has_oauth_creds = (HAVE_OAUTH2 and config.has_option('Credentials', 'gs_oauth2_refresh_token')) has_auth_plugins = config.has_option('Plugin', 'plugin_directory') return (has_goog_creds or has_amzn_creds or has_oauth_creds or has_auth_plugins) def _RoundToNearestExponent(num): i = 0 while i+1 < len(_EXP_STRINGS) and num >= (2 ** _EXP_STRINGS[i+1][0]): i += 1 return i, round(float(num) / 2 ** _EXP_STRINGS[i][0], 2) def MakeHumanReadable(num): """Generates human readable string for a number of bytes. Args: num: The number, in bytes. Returns: A string form of the number using size abbreviations (KB, MB, etc.). """ i, rounded_val = _RoundToNearestExponent(num) return '%s %s' % (rounded_val, _EXP_STRINGS[i][1]) def MakeBitsHumanReadable(num): """Generates human readable string for a number of bits. Args: num: The number, in bits. Returns: A string form of the number using bit size abbreviations (kbit, Mbit, etc.) """ i, rounded_val = _RoundToNearestExponent(num) return '%s %s' % (rounded_val, _EXP_STRINGS[i][2]) def Percentile(values, percent, key=lambda x:x): """Find the percentile of a list of values. Taken from: http://code.activestate.com/recipes/511478/ Args: values: a list of numeric values. Note that the values MUST BE already sorted. percent: a float value from 0.0 to 1.0. key: optional key function to compute value from each element of the list of values. Returns: The percentile of the values. """ if not values: return None k = (len(values) - 1) * percent f = math.floor(k) c = math.ceil(k) if f == c: return key(values[int(k)]) d0 = key(values[int(f)]) * (c-k) d1 = key(values[int(c)]) * (k-f) return d0 + d1 def ExtractErrorDetail(e): """Extract <Details> text from XML content. Args: e: The GSResponseError that includes XML to be parsed. Returns: (exception_name, d), where d is <Details> text or None if not found. """ exc_name_parts = re.split("[\.']", str(type(e))) if len(exc_name_parts) < 2: # Shouldn't happen, but have fallback in case. exc_name = str(type(e)) else: exc_name = exc_name_parts[-2] if not hasattr(e, 'body'): return (exc_name, None) detail_start = e.body.find('<Details>') detail_end = e.body.find('</Details>') if detail_start != -1 and detail_end != -1: return (exc_name, e.body[detail_start+9:detail_end]) return (exc_name, None)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os QUERYENGINE_API_ROOT = "http://{host}:{port}/v3/queryengine".format( host=os.environ["QUERYENGINE_API_HOST"], port=os.environ["QUERYENGINE_API_PORT"] ) AUTH_API_ROOT = "http://{host}:{port}/v3/auth".format( host=os.environ["AUTH_API_HOST"], port=os.environ["AUTH_API_PORT"] ) META_API_ROOT = "http://{host}:{port}/v3/meta".format( host=os.environ["META_API_HOST"], port=os.environ["META_API_PORT"] ) DATALAB_API_ROOT = "http://{host}:{port}/v3/datalab".format( host=os.environ["DATALAB_API_HOST"], port=os.environ["DATALAB_API_PORT"] ) DATAFLOW_API_ROOT = "http://{host}:{port}/v3/dataflow".format( host=os.environ["DATAFLOW_API_HOST"], port=os.environ["DATAFLOW_API_PORT"] ) DATAHUB_API_ROOT = "http://{host}:{port}/v3".format( host=os.environ["DATAHUB_API_HOST"], port=os.environ["DATAHUB_API_PORT"] ) JUPYTERHUB_USER = os.environ["JUPYTERHUB_USER"]
nilq/baby-python
python
# Copyright (C) 2018 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Cycle Task Entry RBAC Factory.""" from ggrc.models import all_models from integration.ggrc import Api from integration.ggrc.access_control.rbac_factories import base from integration.ggrc.models import factories class CycleTaskEntryRBACFactory(base.BaseRBACFactory): """Cycle Task Entry RBAC factory class.""" def __init__(self, user_id, acr, parent=None): """Set up objects for Cycle Task Entry permission tests. Args: user_id: Id of user under which all operations will be run. acr: Instance of ACR that should be assigned for tested user. parent: Model name in scope of which objects should be set up. """ # pylint: disable=unused-argument self.setup_workflow_scope(user_id, acr) self.api = Api() self.create() if user_id: user = all_models.Person.query.get(user_id) self.api.set_user(user) def create(self): """Create new Cycle Task Entry object.""" cycle_task = all_models.CycleTaskGroupObjectTask.query.first() return self.api.post(all_models.CycleTaskEntry, { "cycle_task_entry": { "description": "New Comment", "is_declining_review": "", "context": None, "cycle_task_group_object_task": { "id": cycle_task.id, "type": "CycleTaskGroupObjectTask", }, "cycle": { "id": cycle_task.cycle.id, "type": "Cycle", }, } }) def read(self): """Read existing Cycle Task Entry object.""" cycle_task_entry = all_models.CycleTaskEntry.query.first() return self.api.get(cycle_task_entry, cycle_task_entry.id) def update(self): """Update title of existing Cycle Task Entry object.""" cycle_task_entry = all_models.CycleTaskEntry.query.first() return self.api.put( cycle_task_entry, {"description": factories.random_str()} ) def delete(self): """Delete Cycle Task Entry object.""" cycle_task_entry = all_models.CycleTaskEntry.query.first() return self.api.delete(cycle_task_entry)
nilq/baby-python
python
from simplecv.data import test_transforms as ttas from albumentations import Compose, OneOf, Normalize from albumentations import HorizontalFlip, VerticalFlip, RandomRotate90, RandomCrop from simplecv.api.preprocess import albu from albumentations.pytorch import ToTensorV2 import torch.nn as nn config = dict( model=dict( type='GSiameseResNet', params=dict( backbone=dict( resnet_type='resnext101_32x4d', include_conv5=True, batchnorm_trainable=True, pretrained=True, freeze_at=0, # 16 or 32 output_stride=32, with_cp=(False, False, False, False), norm_layer=nn.BatchNorm2d, ), neck=dict( in_channels_list=(256, 512, 1024, 2048), out_channels=256, ), head=dict( in_channels=256, out_channels=256, num_classes=5, upsample_scale=4.0, num_blocks=1, bottleneck_channels=128 ), loss=dict( cls_weight=1.0, ignore_index=255, dam=dict( ohem=dict( ratio=0.8 ) ), loc=dict( tversky_loss=dict(alpha=0.7, beta=0.3), bce_loss=dict(), ) ) ), ), data=dict( train=dict( type='Xview2PairwiseDataLoader', params=dict( image_dir=('./xview2/train/images', './xview2/tier3/images'), label_dir=('./xview2/train/labels', './xview2/tier3/labels'), mode='segm', include=('pre', 'post'), CV=dict( on=True, cur_k=0, k_fold=5, ), transforms=Compose([ OneOf([ HorizontalFlip(True), VerticalFlip(True), RandomRotate90(True) ], p=0.75), albu.RandomDiscreteScale([0.75, 1.25, 1.5], p=0.5), RandomCrop(640, 640, True), Normalize(mean=(0.485, 0.456, 0.406, 0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225, 0.229, 0.224, 0.225), max_pixel_value=255), ToTensorV2(True), ]), batch_size=4, num_workers=4, training=True ), ), test=dict( type='Xview2PairwiseDataLoader', params=dict( image_dir=('./xview2/train/images', './xview2/tier3/images'), label_dir=('./xview2/train/labels', './xview2/tier3/labels'), mode='segm', include=('pre', 'post'), CV=dict( on=True, cur_k=0, k_fold=5, ), transforms=Compose([ Normalize(mean=(0.485, 0.456, 0.406, 0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225, 0.229, 0.224, 0.225), max_pixel_value=255), ToTensorV2(True), ]), batch_size=1, num_workers=0, training=False ), ), ), optimizer=dict( type='sgd', params=dict( momentum=0.9, weight_decay=0.0001 ), grad_clip=dict( max_norm=35, norm_type=2, ) ), learning_rate=dict( type='poly', params=dict( base_lr=0.03, power=0.9, max_iters=30000, )), train=dict( forward_times=1, num_iters=30000, eval_per_epoch=False, summary_grads=False, summary_weights=False, distributed=True, apex_sync_bn=True, sync_bn=False, eval_after_train=True, log_interval_step=50, save_ckpt_interval_epoch=40, eval_interval_epoch=40, ), test=dict( tta=[ ttas.Rotate90k(1), ttas.Rotate90k(2), ttas.Rotate90k(3), ttas.HorizontalFlip(), ttas.VerticalFlip(), ttas.Transpose(), ttas.Scale(scale_factor=0.75), ttas.Scale(scale_factor=1.0), ttas.Scale(scale_factor=1.25), ttas.Scale(scale_factor=1.5), ] ), )
nilq/baby-python
python
from .logit_lens import LogitLens
nilq/baby-python
python
""" @author Huaze Shen @date 2019-07-19 """ def combination_sum_2(candidates, target): results = [] if candidates is None or len(candidates) == 0: return results candidates = sorted(candidates) combination = [] helper(results, combination, candidates, 0, target) return results def helper(results, combination, candidates, start_index, remain_target): if remain_target == 0: results.append(combination[:]) return for i in range(start_index, len(candidates)): if candidates[i] > remain_target: return if i > start_index and candidates[i] == candidates[i - 1]: continue combination.append(candidates[i]) helper(results, combination, candidates, i + 1, remain_target - candidates[i]) combination.pop() if __name__ == '__main__': candidates_ = [10, 1, 2, 7, 6, 1, 5] target_ = 8 print(combination_sum_2(candidates_, target_))
nilq/baby-python
python
from django.test import TestCase from django.urls import reverse from user.forms import (AssociatedEmailChoiceForm, AddEmailForm, LoginForm, ProfileForm, RegistrationForm) from user.models import User class TestForms(TestCase): def create_test_forms(self, FormClass, valid_dict, invalid_dict, user=None): """ Helper method to create a valid and invalid form of a certain form class. Some forms require the user object """ if user: self.valid_form = FormClass(user=user, data=valid_dict) self.invalid_form = FormClass(user=user, data=invalid_dict) else: self.valid_form = FormClass(data=valid_dict) self.invalid_form = FormClass(data=invalid_dict) def run_test_forms(self, invalid_form_errors): """ Helper method to test the valid form and an invalid form. Input the expected form error of the invalid form. Remember, this method name cannot begin with 'test' """ self.assertTrue(self.valid_form.is_valid()) self.assertFalse(self.invalid_form.is_valid()) self.assertEqual(self.invalid_form.errors, invalid_form_errors) def test_associated_email_choice_form(self): """ Choice field in form, cannot use create helper function """ user = User.objects.get(email='admin@mit.edu') self.valid_form = AssociatedEmailChoiceForm(user=user, selection_type='primary', data={'associated_email':'admin2@mit.edu'}) self.invalid_form = AssociatedEmailChoiceForm(user=user, selection_type='public', data={'associated_email':'nonexistent@mit.edu'}) self.run_test_forms({'associated_email':['Select a valid choice. That choice is not one of the available choices.']}) def test_associated_email_form(self): self.create_test_forms(AddEmailForm, {'email':'tester0@mit.edu'}, {'email':'nonexistent'}) self.run_test_forms({'email': ['Enter a valid email address.']}) def test_login_form(self): self.create_test_forms(LoginForm, {'username':'admin','password':'Tester11!'}, {'username':'admin', 'password':'wrong'}) self.run_test_forms({'__all__':['Please enter a correct username/email and password. Note that the password field is case-sensitive.']}) def test_profile_form(self): self.create_test_forms(ProfileForm, {'first_names':'Tester Mid', 'last_name':'Bot', 'url':'http://physionet.org'}, {'first_names':'Tester Mid', 'last_name':'', 'phone':'0'}) self.run_test_forms({'last_name': ['This field is required.']}) def test_user_creation_form(self): self.create_test_forms(RegistrationForm, {'email': 'tester0@mit.edu', 'username': 'The-Tester', 'first_names': 'Tester Mid', 'last_name': 'Bot'}, {'email': 'tester0@mit.edu', 'username': 'bot-net', 'first_names': '', 'last_name': 'Bot'}) self.run_test_forms({'first_names': ['This field is required.']})
nilq/baby-python
python
# Copyright (c) 2015-2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ This file should only include the version. Do not import any packages or modules here because this file needs to be executed before SWITCH is installed and executed in environments that don't have any dependencies installed. """ __version__='2.0.1'
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from .dependency import Dependency from ..config import Configuration from ..util.process import Process from ..util.color import Color import os class Hashcat(Dependency): dependency_required = False dependency_name = 'hashcat' dependency_url = 'https://hashcat.net/hashcat/' @staticmethod def should_use_force(): command = ['hashcat', '-I'] stderr = Process(command).stderr() return 'No devices found/left' in stderr @staticmethod def crack_handshake(handshake, show_command=False): # Generate hccapx hccapx_file = HcxPcapTool.generate_hccapx_file( handshake, show_command=show_command) key = None # Crack hccapx for additional_arg in ([], ['--show']): command = [ 'hashcat', '--quiet', '-m', '2500', hccapx_file, Configuration.wordlist ] if Hashcat.should_use_force(): command.append('--force') command.extend(additional_arg) if show_command: Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command)) process = Process(command) stdout, stderr = process.get_output() if ':' not in stdout: continue else: key = stdout.split(':', 5)[-1].strip() break if os.path.exists(hccapx_file): os.remove(hccapx_file) return key @staticmethod def crack_pmkid(pmkid_file, verbose=False): ''' Cracks a given pmkid_file using the PMKID/WPA2 attack (-m 16800) Returns: Key (str) if found; `None` if not found. ''' # Run hashcat once normally, then with --show if it failed # To catch cases where the password is already in the pot file. for additional_arg in ([], ['--show']): command = [ 'hashcat', '--quiet', # Only output the password if found. '-m', '16800', # WPA-PMKID-PBKDF2 '-a', '0', # Wordlist attack-mode pmkid_file, Configuration.wordlist ] if Hashcat.should_use_force(): command.append('--force') command.extend(additional_arg) if verbose and additional_arg == []: Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command)) # TODO: Check status of hashcat (%); it's impossible with --quiet hashcat_proc = Process(command) hashcat_proc.wait() stdout = hashcat_proc.stdout() if ':' not in stdout: # Failed continue else: # Cracked key = stdout.strip().split(':', 1)[1] return key class HcxDumpTool(Dependency): dependency_required = False dependency_name = 'hcxdumptool' dependency_url = 'https://github.com/ZerBea/hcxdumptool' def __init__(self, target, pcapng_file): # Create filterlist filterlist = Configuration.temp('pmkid.filterlist') with open(filterlist, 'w') as filter_handle: filter_handle.write(target.bssid.replace(':', '')) if os.path.exists(pcapng_file): os.remove(pcapng_file) command = [ 'hcxdumptool', '-i', Configuration.interface, '--filterlist', filterlist, '--filtermode', '2', '-c', str(target.channel), '-o', pcapng_file ] self.proc = Process(command) def poll(self): return self.proc.poll() def interrupt(self): self.proc.interrupt() class HcxPcapTool(Dependency): dependency_required = False dependency_name = 'hcxpcaptool' dependency_url = 'https://github.com/ZerBea/hcxtools' def __init__(self, target): self.target = target self.bssid = self.target.bssid.lower().replace(':', '') self.pmkid_file = Configuration.temp('pmkid-%s.16800' % self.bssid) @staticmethod def generate_hccapx_file(handshake, show_command=False): hccapx_file = Configuration.temp('generated.hccapx') if os.path.exists(hccapx_file): os.remove(hccapx_file) command = [ 'hcxpcaptool', '-o', hccapx_file, handshake.capfile ] if show_command: Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command)) process = Process(command) stdout, stderr = process.get_output() if not os.path.exists(hccapx_file): raise ValueError('Failed to generate .hccapx file, output: \n%s\n%s' % ( stdout, stderr)) return hccapx_file @staticmethod def generate_john_file(handshake, show_command=False): john_file = Configuration.temp('generated.john') if os.path.exists(john_file): os.remove(john_file) command = [ 'hcxpcaptool', '-j', john_file, handshake.capfile ] if show_command: Color.pl('{+} {D}Running: {W}{P}%s{W}' % ' '.join(command)) process = Process(command) stdout, stderr = process.get_output() if not os.path.exists(john_file): raise ValueError('Failed to generate .john file, output: \n%s\n%s' % ( stdout, stderr)) return john_file def get_pmkid_hash(self, pcapng_file): if os.path.exists(self.pmkid_file): os.remove(self.pmkid_file) command = [ 'hcxpcaptool', '-z', self.pmkid_file, pcapng_file ] hcxpcap_proc = Process(command) hcxpcap_proc.wait() if not os.path.exists(self.pmkid_file): return None with open(self.pmkid_file, 'r') as f: output = f.read() # Each line looks like: # hash*bssid*station*essid # Note: The dumptool will record *anything* it finds, ignoring the filterlist. # Check that we got the right target (filter by BSSID) matching_pmkid_hash = None for line in output.split('\n'): fields = line.split('*') if len(fields) >= 3 and fields[1].lower() == self.bssid: # Found it matching_pmkid_hash = line break os.remove(self.pmkid_file) return matching_pmkid_hash
nilq/baby-python
python
from PyQt5.QtWidgets import QWidget, \ QHBoxLayout,\ QVBoxLayout,\ QDialog,\ QLineEdit,\ QLabel,\ QPushButton from PyQt5.QtCore import Qt class NewFile(QDialog): def __init__(self, parent=None): super().__init__(parent) self.name = QLineEdit() self.name.setText("Untilted") self.__btn_create_clicked = False self.setup_ui() self.show() def setup_ui(self): self.setWindowTitle("New File") self.resize(300, 80) self.setWindowModality(Qt.ApplicationModal) main_lay = QVBoxLayout() lay1 = QHBoxLayout() lay2 = QHBoxLayout() label = QLabel() label.setText("File name:") lay1.addWidget(label) lay1.addWidget(self.name) btn_ok = QPushButton("Create") btn_ok.clicked.connect(self.__btn_ok_click) btn_cancel = QPushButton("Cancel") btn_cancel.clicked.connect(self.close) lay2.addWidget(btn_ok) lay2.addWidget(btn_cancel) main_lay.addLayout(lay1) main_lay.addLayout(lay2) self.setLayout(main_lay) def __btn_ok_click(self): self.__btn_create_clicked = True if self.name.text() == "": self.name.setText("Untilted") self.close() def is_create_clicked(self): return self.__btn_create_clicked
nilq/baby-python
python
''' Base class for RTE test suite ''' import abc import numpy as np class BaseTestRTE(object): ''' base class to test all interfaces ''' __metaclass__ = abc.ABCMeta @property @abc.abstractmethod def _interface(self): return None def test_apply_bc_0(self): ''' apply zero boundary condition ''' downward_flux = np.random.randn(2, 20, 3) out_array = self._interface.apply_zero_bc(downward_flux) self.validate_bcs(out_array, 0) def test_apply_inc_flux(self): ''' apply incident flux ''' downward_flux = np.random.randn(2, 20, 3) incident_flux = 10*np.ones((2, 3), dtype=np.double) out_array = self._interface.apply_gpoint_bc( downward_flux, incident_flux) self.validate_bcs(out_array, incident_flux) def test_apply_scaled_inc_flux(self): ''' apply incident flux ''' downward_flux = np.random.randn(2, 20, 3) incident_flux = 10*np.ones((2, 3), dtype=np.double) scale_factor = np.arange(3, dtype=np.double) out_array = self._interface.apply_scaled_gpoint_bc( downward_flux, incident_flux, scale_factor) self.validate_bcs(out_array, scale_factor*incident_flux) def validate_bcs(self, array, target): ''' check if bcs are applied correctly. Args: array (ndarray): output from RTE. target (ndarray or float): what to check against. ''' if self._interface.direction == 'top_to_bottom': assert np.all(array[:, 0, :] == target) else: assert np.all(array[:, -1, :] == target)
nilq/baby-python
python
"""A client for Team Foundation Server.""" from __future__ import unicode_literals import logging import os import re import sys import tempfile import xml.etree.ElementTree as ET from six.moves.urllib.parse import unquote from rbtools.clients import RepositoryInfo, SCMClient from rbtools.clients.errors import (InvalidRevisionSpecError, SCMError, TooManyRevisionsError) from rbtools.utils.appdirs import user_data_dir from rbtools.utils.checks import check_gnu_diff, check_install from rbtools.utils.diffs import filename_match_any_patterns from rbtools.utils.process import execute class TFExeWrapper(object): """Implementation wrapper for using VS2017's tf.exe.""" REVISION_WORKING_COPY = '--rbtools-working-copy' def __init__(self, config=None, options=None): """Initialize the wrapper. Args: config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command line options. """ self.config = config self.options = options def get_local_path(self): """Return the local path to the working tree. Returns: unicode: The filesystem path of the repository on the client system. """ workfold = self._run_tf(['vc', 'workfold', os.getcwd()]) m = re.search('^Collection: (.*)$', workfold, re.MULTILINE) if m: return unquote(m.group(1)) logging.debug('Could not find the collection from "tf vc workfold"') return None def get_repository_info(self): """Return repository information for the current working tree. Returns: rbtools.clients.RepositoryInfo: The repository info structure. """ path = self.get_local_path() if path: # Now that we know it's TFS, make sure we have GNU diff installed, and # error out if we don't. check_gnu_diff() return RepositoryInfo(path=path, local_path=path) return None def parse_revision_spec(self, revisions): """Parse the given revision spec. Args: revisions (list of unicode): A list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of ``r1~r2``. Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. Returns: dict: A dictionary with the following keys: ``base`` (:py:class:`unicode`): A revision to use as the base of the resulting diff. ``tip`` (:py:class:`unicode`): A revision to use as the tip of the resulting diff. ``parent_base`` (:py:class:`unicode`, optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for "base" and the passed-in revision for "tip". If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). """ n_revisions = len(revisions) if n_revisions == 1 and '~' in revisions[0]: revisions = revisions[0].split('~') n_revisions = len(revisions) if n_revisions == 0: # Most recent checked-out revision -- working copy return { 'base': self._convert_symbolic_revision('W'), 'tip': self.REVISION_WORKING_COPY, } elif n_revisions == 1: # Either a numeric revision (n-1:n) or a changelist revision = self._convert_symbolic_revision(revisions[0]) return { 'base': revision - 1, 'tip': revision, } elif n_revisions == 2: # Diff between two numeric revisions return { 'base': self._convert_symbolic_revision(revisions[0]), 'tip': self._convert_symbolic_revision(revisions[1]), } else: raise TooManyRevisionsError return { 'base': None, 'tip': None, } def _convert_symbolic_revision(self, revision, path=None): """Convert a symbolic revision into a numeric changeset. Args: revision (unicode): The TFS versionspec to convert. path (unicode, optional): The itemspec that the revision applies to. Returns: int: The changeset number corresponding to the versionspec. """ # We pass results_unicode=False because that uses the filesystem # encoding to decode the output, but the XML results we get should # always be UTF-8, and are well-formed with the encoding specified. We # can therefore let ElementTree determine how to decode it. data = self._run_tf(['vc', 'history', '/stopafter:1', '/recursive', '/format:detailed', '/version:%s' % revision, path or os.getcwd()]) m = re.search('^Changeset: (\d+)$', data, re.MULTILINE) if not m: logging.debug('Failed to parse output from "tf vc history":\n%s', data) raise InvalidRevisionSpecError( '"%s" does not appear to be a valid versionspec' % revision) def diff(self, revisions, include_files, exclude_patterns, **kwargs): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. **kwargs (dict, unused): Unused keyword arguments. Returns: dict: A dictionary containing the following keys: ``diff`` (:py:class:`bytes`): The contents of the diff to upload. ``base_commit_id` (:py:class:`unicode`, optional): The ID of the commit that the change is based on, if available. This is necessary for some hosting services that don't provide individual file access. """ base = str(revisions['base']) tip = str(revisions['tip']) if tip == self.REVISION_WORKING_COPY: # TODO: support committed revisions return self._diff_working_copy(base, include_files, exclude_patterns) else: raise SCMError('Posting committed changes is not yet supported ' 'for TFS when using the tf.exe wrapper.') def _diff_working_copy(self, base, include_files, exclude_patterns): """Return a diff of the working copy. Args: base (unicode): The base revision to diff against. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ # We pass results_unicode=False because that uses the filesystem # encoding, but the XML results we get should always be UTF-8, and are # well-formed with the encoding specified. We can therefore let # ElementTree determine how to decode it. status = self._run_tf(['vc', 'status', '/format:xml'], results_unicode=False) root = ET.fromstring(status) diff = [] for pending_change in root.findall( './PendingSet/PendingChanges/PendingChange'): action = pending_change.attrib['chg'].split(' ') old_filename = \ pending_change.attrib.get('srcitem', '').encode('utf-8') new_filename = pending_change.attrib['item'].encode('utf-8') local_filename = pending_change.attrib['local'] old_version = \ pending_change.attrib.get('svrfm', '0').encode('utf-8') file_type = pending_change.attrib['type'] encoding = pending_change.attrib['enc'] new_version = b'(pending)' old_data = b'' new_data = b'' binary = (encoding == '-1') copied = 'Branch' in action if (not file_type or (not os.path.isfile(local_filename) and 'Delete' not in action)): continue if (exclude_patterns and filename_match_any_patterns(local_filename, exclude_patterns, base_dir=None)): continue if 'Add' in action: old_filename = b'/dev/null' if not binary: with open(local_filename, 'rb') as f: new_data = f.read() old_data = b'' elif 'Delete' in action: old_data = self._run_tf( ['vc', 'view', '/version:%s' % old_version.decode('utf-8'), old_filename.decode('utf-8')], results_unicode=False) new_data = b'' new_version = b'(deleted)' elif 'Edit' in action: if not binary: old_data = self._run_tf( ['vc', 'view', old_filename.decode('utf-8'), '/version:%s' % old_version.decode('utf-8')], results_unicode=False) with open(local_filename, 'rb') as f: new_data = f.read() old_label = b'%s\t%s' % (old_filename, old_version) new_label = b'%s\t%s' % (new_filename, new_version) if copied: diff.append(b'Copied from: %s\n' % old_filename) if binary: if 'Add' in action: old_filename = new_filename diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) diff.append(b'Binary files %s and %s differ\n' % (old_filename, new_filename)) elif old_filename != new_filename and old_data == new_data: # Renamed file with no changes. diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) else: old_tmp = tempfile.NamedTemporaryFile(delete=False) old_tmp.write(old_data) old_tmp.close() new_tmp = tempfile.NamedTemporaryFile(delete=False) new_tmp.write(new_data) new_tmp.close() unified_diff = execute( ['diff', '-u', '--label', old_label.decode('utf-8'), '--label', new_label.decode('utf-8'), old_tmp.name, new_tmp.name], extra_ignore_errors=(1,), log_output_on_error=False, results_unicode=False) diff.append(unified_diff) os.unlink(old_tmp.name) os.unlink(new_tmp.name) return { 'diff': b''.join(diff), 'parent_diff': None, 'base_commit_id': base, } def _run_tf(self, args, **kwargs): """Run the "tf" command. Args: args (list): A list of arguments to pass to rb-tfs. **kwargs (dict): Additional keyword arguments for the :py:meth:`execute` call. Returns: unicode: The output of the command. """ command = ['tf'] + args + ['/noprompt'] if getattr(self.options, 'tfs_login', None): command.append('/login:%s' % self.options.tfs_login) return execute(command, ignore_errors=True, **kwargs) class TEEWrapper(object): """Implementation wrapper for using Team Explorer Everywhere.""" REVISION_WORKING_COPY = '--rbtools-working-copy' def __init__(self, config=None, options=None): """Initialize the wrapper. Args: config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command line options. """ self.config = config self.options = options self.tf = None tf_locations = [] if options and getattr(options, 'tf_cmd', None): tf_locations.append(options.tf_cmd) if sys.platform.startswith('win'): # First check in the system path. If that doesn't work, look in the # two standard install locations. tf_locations.extend([ 'tf.cmd', (r'%programfiles(x86)%\Microsoft Visual Studio 12.0\Common7' r'\IDE\tf.cmd'), (r'%programfiles%\Microsoft Team Foundation Server 12.0\Tools' r'\tf.cmd'), ]) else: tf_locations.append('tf') for location in tf_locations: location = os.path.expandvars(location) if check_install([location, 'help']): self.tf = location break def get_local_path(self): """Return the local path to the working tree. Returns: unicode: The filesystem path of the repository on the client system. """ if self.tf is None: logging.debug('Unable to execute "tf help": skipping TFS') return None workfold = self._run_tf(['workfold', os.getcwd()]) m = re.search('^Collection: (.*)$', workfold, re.MULTILINE) if m: return unquote(m.group(1)) logging.debug('Could not find the collection from "tf workfold"') return None def get_repository_info(self): """Return repository information for the current working tree. Returns: rbtools.clients.RepositoryInfo: The repository info structure. """ path = self.get_local_path() if path: # Now that we know it's TFS, make sure we have GNU diff installed, # and error out if we don't. check_gnu_diff() return RepositoryInfo(path=path, local_path=path) return None def parse_revision_spec(self, revisions): """Parse the given revision spec. Args: revisions (list of unicode): A list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of ``r1~r2``. Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). Returns: dict: A dictionary with the following keys: ``base`` (:py:class:`unicode`): A revision to use as the base of the resulting diff. ``tip`` (:py:class:`unicode`): A revision to use as the tip of the resulting diff. ``parent_base`` (:py:class:`unicode`, optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for "base" and the passed-in revision for "tip". If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ n_revisions = len(revisions) if n_revisions == 1 and '~' in revisions[0]: revisions = revisions[0].split('~') n_revisions = len(revisions) if n_revisions == 0: # Most recent checked-out revision -- working copy return { 'base': self._convert_symbolic_revision('W'), 'tip': self.REVISION_WORKING_COPY, } elif n_revisions == 1: # Either a numeric revision (n-1:n) or a changelist revision = self._convert_symbolic_revision(revisions[0]) return { 'base': revision - 1, 'tip': revision, } elif n_revisions == 2: # Diff between two numeric revisions return { 'base': self._convert_symbolic_revision(revisions[0]), 'tip': self._convert_symbolic_revision(revisions[1]), } else: raise TooManyRevisionsError return { 'base': None, 'tip': None, } def _convert_symbolic_revision(self, revision, path=None): """Convert a symbolic revision into a numeric changeset. Args: revision (unicode): The TFS versionspec to convert. path (unicode, optional): The itemspec that the revision applies to. Returns: int: The changeset number corresponding to the versionspec. """ args = ['history', '-stopafter:1', '-recursive', '-format:xml'] # 'tf history -version:W'` doesn't seem to work (even though it's # supposed to). Luckily, W is the default when -version isn't passed, # so just elide it. if revision != 'W': args.append('-version:%s' % revision) args.append(path or os.getcwd()) # We pass results_unicode=False because that uses the filesystem # encoding to decode the output, but the XML results we get should # always be UTF-8, and are well-formed with the encoding specified. We # can therefore let ElementTree determine how to decode it. data = self._run_tf(args, results_unicode=False) try: root = ET.fromstring(data) item = root.find('./changeset') if item is not None: return int(item.attrib['id']) else: raise Exception('No changesets found') except Exception as e: logging.debug('Failed to parse output from "tf history": %s\n%s', e, data, exc_info=True) raise InvalidRevisionSpecError( '"%s" does not appear to be a valid versionspec' % revision) def diff(self, revisions, include_files, exclude_patterns): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing the following keys: ``diff`` (:py:class:`bytes`): The contents of the diff to upload. ``base_commit_id` (:py:class:`unicode`, optional): The ID of the commit that the change is based on, if available. This is necessary for some hosting services that don't provide individual file access. """ base = str(revisions['base']) tip = str(revisions['tip']) if tip == self.REVISION_WORKING_COPY: return self._diff_working_copy(base, include_files, exclude_patterns) else: raise SCMError('Posting committed changes is not yet supported ' 'for TFS when using the Team Explorer Everywhere ' 'wrapper.') def _diff_working_copy(self, base, include_files, exclude_patterns): """Return a diff of the working copy. Args: base (unicode): The base revision to diff against. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ # We pass results_unicode=False because that uses the filesystem # encoding, but the XML results we get should always be UTF-8, and are # well-formed with the encoding specified. We can therefore let # ElementTree determine how to decode it. status = self._run_tf(['status', '-format:xml'], results_unicode=False) root = ET.fromstring(status) diff = [] for pending_change in root.findall('./pending-changes/pending-change'): action = pending_change.attrib['change-type'].split(', ') new_filename = pending_change.attrib['server-item'].encode('utf-8') local_filename = pending_change.attrib['local-item'] old_version = pending_change.attrib['version'].encode('utf-8') file_type = pending_change.attrib.get('file-type') new_version = b'(pending)' old_data = b'' new_data = b'' copied = 'branch' in action if (not file_type or (not os.path.isfile(local_filename) and 'delete' not in action)): continue if (exclude_patterns and filename_match_any_patterns(local_filename, exclude_patterns, base_dir=None)): continue if 'rename' in action: old_filename = \ pending_change.attrib['source-item'].encode('utf-8') else: old_filename = new_filename if copied: old_filename = \ pending_change.attrib['source-item'].encode('utf-8') old_version = ( '%d' % self._convert_symbolic_revision( 'W', old_filename.decode('utf-8'))) if 'add' in action: old_filename = b'/dev/null' if file_type != 'binary': with open(local_filename) as f: new_data = f.read() old_data = b'' elif 'delete' in action: old_data = self._run_tf( ['print', '-version:%s' % old_version.decode('utf-8'), old_filename.decode('utf-8')], results_unicode=False) new_data = b'' new_version = b'(deleted)' elif 'edit' in action: old_data = self._run_tf( ['print', '-version:%s' % old_version.decode('utf-8'), old_filename.decode('utf-8')], results_unicode=False) with open(local_filename) as f: new_data = f.read() old_label = b'%s\t%s' % (old_filename, old_version) new_label = b'%s\t%s' % (new_filename, new_version) if copied: diff.append(b'Copied from: %s\n' % old_filename) if file_type == 'binary': if 'add' in action: old_filename = new_filename diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) diff.append(b'Binary files %s and %s differ\n' % (old_filename, new_filename)) elif old_filename != new_filename and old_data == new_data: # Renamed file with no changes diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) else: old_tmp = tempfile.NamedTemporaryFile(delete=False) old_tmp.write(old_data) old_tmp.close() new_tmp = tempfile.NamedTemporaryFile(delete=False) new_tmp.write(new_data) new_tmp.close() unified_diff = execute( ['diff', '-u', '--label', old_label.decode('utf-8'), '--label', new_label.decode('utf-8'), old_tmp.name, new_tmp.name], extra_ignore_errors=(1,), log_output_on_error=False, results_unicode=False) diff.append(unified_diff) os.unlink(old_tmp.name) os.unlink(new_tmp.name) if len(root.findall('./candidate-pending-changes/pending-change')) > 0: logging.warning('There are added or deleted files which have not ' 'been added to TFS. These will not be included ' 'in your review request.') return { 'diff': b''.join(diff), 'parent_diff': None, 'base_commit_id': base, } def _run_tf(self, args, **kwargs): """Run the "tf" command. Args: args (list): A list of arguments to pass to rb-tfs. **kwargs (dict): Additional keyword arguments for the :py:meth:`execute` call. Returns: unicode: The output of the command. """ cmdline = [self.tf, '-noprompt'] if getattr(self.options, 'tfs_login', None): cmdline.append('-login:%s' % self.options.tfs_login) cmdline += args # Use / style arguments when running on windows. if sys.platform.startswith('win'): for i, arg in enumerate(cmdline): if arg.startswith('-'): cmdline[i] = '/' + arg[1:] return execute(cmdline, ignore_errors=True, **kwargs) class TFHelperWrapper(object): """Implementation wrapper using our own helper.""" def __init__(self, helper_path, config=None, options=None): """Initialize the wrapper. Args: helper_path (unicode): The path to the helper binary. config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command line options. """ self.helper_path = helper_path self.config = config self.options = options def get_local_path(self): """Return the local path to the working tree. Returns: unicode: The filesystem path of the repository on the client system. """ rc, path, errors = self._run_helper(['get-collection'], ignore_errors=True) if rc == 0: return path.strip() return None def get_repository_info(self): """Return repository information for the current working tree. Returns: rbtools.clients.RepositoryInfo: The repository info structure. """ path = self.get_local_path() if path: return RepositoryInfo(path=path, local_path=path) return None def parse_revision_spec(self, revisions): """Parse the given revision spec. Args: revisions (list of unicode): A list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of ``r1~r2``. Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). Returns: dict: A dictionary with the following keys: ``base`` (:py:class:`unicode`): A revision to use as the base of the resulting diff. ``tip`` (:py:class:`unicode`): A revision to use as the tip of the resulting diff. ``parent_base`` (:py:class:`unicode`, optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for "base" and the passed-in revision for "tip". If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ if len(revisions) > 2: raise TooManyRevisionsError rc, revisions, errors = self._run_helper( ['parse-revision'] + revisions, split_lines=True) if rc == 0: return { 'base': revisions[0].strip(), 'tip': revisions[1].strip() } else: raise InvalidRevisionSpecError('\n'.join(errors)) def diff(self, revisions, include_files, exclude_patterns): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing the following keys: ``diff`` (:py:class:`bytes`): The contents of the diff to upload. ``base_commit_id` (:py:class:`unicode`, optional): The ID of the commit that the change is based on, if available. This is necessary for some hosting services that don't provide individual file access. Raises: rbtools.clients.errors.SCMError: Something failed when creating the diff. """ base = revisions['base'] tip = revisions['tip'] rc, diff, errors = self._run_helper(['diff', '--', base, tip], ignore_errors=True, results_unicode=False, log_output_on_error=False) if rc in (0, 2): if rc == 2: # Magic return code that means success, but there were # un-tracked files in the working directory. logging.warning('There are added or deleted files which have ' 'not been added to TFS. These will not be ' 'included in your review request.') return { 'diff': diff, 'parent_diff': None, 'base_commit_id': None, } else: raise SCMError(errors.strip()) def _run_helper(self, args, **kwargs): """Run the rb-tfs binary. Args: args (list): A list of arguments to pass to rb-tfs. **kwargs (dict): Additional keyword arguments for the :py:meth:`execute` call. Returns: tuple: A 3-tuple of return code, output, and error output. The output and error output may be lists depending on the contents of ``kwargs``. """ if len(args) == 0: raise ValueError('_run_helper called without any arguments') cmdline = ['java'] cmdline += getattr(self.config, 'JAVA_OPTS', ['-Xmx2048M']) cmdline += ['-jar', self.helper_path] cmdline.append(args[0]) if self.options: if self.options.debug: cmdline.append('--debug') if getattr(self.options, 'tfs_shelveset_owner', None): cmdline += ['--shelveset-owner', self.options.tfs_shelveset_owner] if getattr(self.options, 'tfs_login', None): cmdline += ['--login', self.options.tfs_login] cmdline += args[1:] return execute(cmdline, with_errors=False, results_unicode=False, return_error_code=True, return_errors=True, **kwargs) class TFSClient(SCMClient): """A client for Team Foundation Server.""" name = 'Team Foundation Server' server_tool_names = 'Team Foundation Server' supports_diff_exclude_patterns = True supports_patch_revert = True def __init__(self, config=None, options=None): """Initialize the client. Args: config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command line options. """ super(TFSClient, self).__init__(config, options) # There are three different backends that can be used to access the # underlying TFS repository. We try them in this order: # - VS2017+ tf.exe # - Our custom rb-tfs wrapper, built on the TFS Java SDK # - Team Explorer Everywhere's tf command use_tf_exe = False try: tf_vc_output = execute(['tf', 'vc', 'help'], ignore_errors=True, none_on_ignored_error=True) # VS2015 has a tf.exe but it's not good enough. if (tf_vc_output and 'Version Control Tool, Version 15' in tf_vc_output): use_tf_exe = True except OSError: pass helper_path = os.path.join(user_data_dir('rbtools'), 'packages', 'tfs', 'rb-tfs.jar') if use_tf_exe: self.tf_wrapper = TFExeWrapper(config, options) elif os.path.exists(helper_path): self.tf_wrapper = TFHelperWrapper(helper_path, config, options) else: self.tf_wrapper = TEEWrapper(config, options) def get_local_path(self): """Return the local path to the working tree. Returns: unicode: The filesystem path of the repository on the client system. """ return self.tf_wrapper.get_local_path() def get_repository_info(self): """Return repository information for the current working tree. Returns: rbtools.clients.RepositoryInfo: The repository info structure. """ return self.tf_wrapper.get_repository_info() def parse_revision_spec(self, revisions): """Parse the given revision spec. Args: revisions (list of unicode): A list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of ``r1~r2``. Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). Returns: dict: A dictionary with the following keys: ``base`` (:py:class:`unicode`): A revision to use as the base of the resulting diff. ``tip`` (:py:class:`unicode`): A revision to use as the tip of the resulting diff. ``parent_base`` (:py:class:`unicode`, optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for "base" and the passed-in revision for "tip". If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ return self.tf_wrapper.parse_revision_spec(revisions) def diff(self, revisions, include_files=[], exclude_patterns=[], no_renames=False, extra_args=[]): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list, optional): A list of file paths to include in the diff. exclude_patterns (list, optional): A list of file paths to exclude from the diff. extra_args (list, optional): Unused. Returns: dict: A dictionary containing the following keys: ``diff`` (:py:class:`bytes`): The contents of the diff to upload. ``base_commit_id` (:py:class:`unicode`, optional): The ID of the commit that the change is based on, if available. This is necessary for some hosting services that don't provide individual file access. """ return self.tf_wrapper.diff(revisions, include_files, exclude_patterns)
nilq/baby-python
python
########### IMPORTING THE REQURIED LIBRARIES ########### from __future__ import print_function from bs4 import BeautifulSoup as soup from random import choice from terminaltables import AsciiTable from .proxy import _proxy from .utils import * import requests ######## DECLARING THE CLASS FOR GETTING COVID-19 DATA ######## class Corona: proxy = _proxy() ######## GETTING THE HTML PAGE THROUGH GET REQUEST ######## def getPageResponse( self, url ): page = None try: resp = requests.get( url, timeout = MAX_TIMEOUT ) page = soup( resp.text, 'lxml' ) except requests.ConnectionError: print( "\n###### STARTING RANDOM PROXIES #######\n" ); resp = self.proxy.loadDataByIPRotation( url ) page = soup( resp.text, 'lxml' ) return page def extractCounts( self, page, choice = "w" ): total_cases = None total_deaths = None total_cured = None if( choice == "w" ): total_cases = page.findAll( "div", { "id": "maincounter-wrap" } )[ 0 ].div.text.strip() total_deaths = page.findAll( "div", { "id": "maincounter-wrap" } )[ 1 ].div.text.strip() total_cured = page.findAll( "div", { "id": "maincounter-wrap" } )[ 2 ].div.text.strip() elif( choice == "c" ): total_cases = int( extractNumbers( page.findAll( "div",{ "class": "table-responsive" } )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 1 ].text.strip() ) ) total_cases += int( page.findAll( "div",{ "class": "table-responsive" } )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 2 ].text.strip() ) total_deaths = int( page.findAll( "div",{ "class": "table-responsive" } )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 4 ].text.strip() ) total_cured = int( page.findAll( "div",{ "class": "table-responsive" } )[ 7 ].tbody.findAll( "tr" )[ -2 : -1 ][ 0 ].findAll( "td" )[ 3 ].text.strip() ) counts = AsciiTable( [ [ "Total Cases", "Total Deaths", "Total Cured" ], [ total_cases, total_deaths, total_cured ] ] ) return counts ########## EXTRACTING THE TABLE ########### def extractTableData( self, page, choice = "w" ): table = None table_heading = None table_content = None if choice == "w": try: table = page.find( "table",{ "id": "main_table_countries_today" } ) # table_heading = [ item.text.strip() for item in table.thead.tr if item != "\n" ] table_heading = [ "Country", "Confirmed\nCases", "New Cases", "Confirmed\nDeaths", "New Deaths", "Recovered", "Active cases", "Serious/\nCritical cases" ]; table_content = [] for rows in table.tbody: data = [ item.text.strip() for item in rows if item != "\n" ] if data: table_content.append( data[ : -2 ] ) table_content.insert( 0, table_heading ) table = AsciiTable( table_content ) except: print( "\nSource page format has changed." ) exit(); elif choice == "c": try: table = page.findAll( "div",{ "class": "table-responsive" } )[ 7 ] # table_heading = [ item.text.strip() for item in table.thead.tr if item != "\n" ] table_heading = [ "Sl. No.", "States/\nUnion Territories", "Confirmed cases\n( Indian National )", "Confirmed cases\n( Foreign National )", "Cured/Discharged/\nMigrated", "Death" ]; table_content = [] for rows in table.tbody: data = [ item.text.strip() for item in rows if item != "\n" ] if data: table_content.append( data ) table_content.insert( 0, table_heading ) table = AsciiTable( table_content[ : -2 ] ) except: print( "\nSource page format has changed." ) exit(); return table
nilq/baby-python
python
#!/usr/bin/env python3 import sys import re # www.hackerrank.com # http://www.hackerrank.com # Regex_Pattern = r'^\w{3}\W{1}\w+\W{1}\w{3}$' Regex_Pattern = r'^\d{1}\w{4}\.$' print(str(bool(re.search(Regex_Pattern, input()))).lower())
nilq/baby-python
python
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file import os,re from waflib import Utils,Options,Context gnuopts=''' bindir, user commands, ${EXEC_PREFIX}/bin sbindir, system binaries, ${EXEC_PREFIX}/sbin libexecdir, program-specific binaries, ${EXEC_PREFIX}/libexec sysconfdir, host-specific configuration, ${PREFIX}/etc sharedstatedir, architecture-independent variable data, ${PREFIX}/com localstatedir, variable data, ${PREFIX}/var libdir, object code libraries, ${EXEC_PREFIX}/lib%s includedir, header files, ${PREFIX}/include oldincludedir, header files for non-GCC compilers, /usr/include datarootdir, architecture-independent data root, ${PREFIX}/share datadir, architecture-independent data, ${DATAROOTDIR} infodir, GNU "info" documentation, ${DATAROOTDIR}/info localedir, locale-dependent data, ${DATAROOTDIR}/locale mandir, manual pages, ${DATAROOTDIR}/man docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE} htmldir, HTML documentation, ${DOCDIR} dvidir, DVI documentation, ${DOCDIR} pdfdir, PDF documentation, ${DOCDIR} psdir, PostScript documentation, ${DOCDIR} '''%Utils.lib64() _options=[x.split(', ')for x in gnuopts.splitlines()if x] def configure(conf): def get_param(varname,default): return getattr(Options.options,varname,'')or default env=conf.env env.LIBDIR=env.BINDIR=[] env.EXEC_PREFIX=get_param('EXEC_PREFIX',env.PREFIX) env.PACKAGE=getattr(Context.g_module,'APPNAME',None)or env.PACKAGE complete=False iter=0 while not complete and iter<len(_options)+1: iter+=1 complete=True for name,help,default in _options: name=name.upper() if not env[name]: try: env[name]=Utils.subst_vars(get_param(name,default).replace('/',os.sep),env) except TypeError: complete=False if not complete: lst=[x for x,_,_ in _options if not env[x.upper()]] raise conf.errors.WafError('Variable substitution failure %r'%lst) def options(opt): inst_dir=opt.add_option_group('Installation prefix','By default, "waf install" will put the files in\ "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\ than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"') for k in('--prefix','--destdir'): option=opt.parser.get_option(k) if option: opt.parser.remove_option(k) inst_dir.add_option(option) inst_dir.add_option('--exec-prefix',help='installation prefix for binaries [PREFIX]',default='',dest='EXEC_PREFIX') dirs_options=opt.add_option_group('Installation directories') for name,help,default in _options: option_name='--'+name str_default=default str_help='%s [%s]'%(help,re.sub(r'\$\{([^}]+)\}',r'\1',str_default)) dirs_options.add_option(option_name,help=str_help,default='',dest=name.upper())
nilq/baby-python
python
import logging import numpy import parse_cif_file import os import sys from operator import itemgetter def get_dihedral_angle1(p0,p1,p2,p3): """http://stackoverflow.com/q/20305272/1128289""" p = [p0, p1, p2, p3] b = p[:-1] - p[1:] b[0] *= -1 v = numpy.array( [ v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]] ] ) # Normalize vectors v /= numpy.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1) b1 = b[1] / np.linalg.norm(b[1]) x = numpy.dot(v[0], v[1]) m = numpy.cross(v[0], b1) y = numpy.dot(m, v[1]) return numpy.degrees(np.arctan2( y, x )) def get_dihedral_angle2(p0,p1,p2,p3): """formula from Wikipedia article on "Dihedral angle"; formula was removed from the most recent version of article (no idea why, the article is a mess at the moment) but the formula can be found in at this permalink to an old version of the article: https://en.wikipedia.org/w/index.php?title=Dihedral_angle&oldid=689165217#Angle_between_three_vectors uses 1 sqrt, 3 cross products""" # p0 = p[0] # p1 = p[1] # p2 = p[2] # p3 = p[3] b0 = -1.0*(p1 - p0) b1 = p2 - p1 b2 = p3 - p2 b0xb1 = numpy.cross(b0, b1) b1xb2 = numpy.cross(b2, b1) b0xb1_x_b1xb2 = numpy.cross(b0xb1, b1xb2) y = numpy.dot(b0xb1_x_b1xb2, b1)*(1.0/numpy.linalg.norm(b1)) x = numpy.dot(b0xb1, b1xb2) return numpy.degrees(numpy.arctan2(y, x)) def get_dihedral_angle(p0, p1, p2, p3): """Praxeolitic formula 1 sqrt, 1 cross product""" b0 = -1.0 * (p1 - p0) b1 = p2 - p1 b2 = p3 - p2 # normalize b1 so that it does not influence magnitude of vector # rejections that come next b1 /= numpy.linalg.norm(b1) # vector rejections # v = projection of b0 onto plane perpendicular to b1 # = b0 minus component that aligns with b1 # w = projection of b2 onto plane perpendicular to b1 # = b2 minus component that aligns with b1 v = b0 - numpy.dot(b0, b1) * b1 w = b2 - numpy.dot(b2, b1) * b1 # angle between v and w in a plane is the torsion angle # v and w may not be normalized but that's fine since tan is y/x x = numpy.dot(v, w) y = numpy.dot(numpy.cross(b1, v), w) return numpy.degrees(numpy.arctan2(y, x)) def calculate_dihedral_angles(cif_file_name, in_dir, out_dir): cif_file = '{}/{}'.format(in_dir, cif_file_name) cif,bf,ent_id = parse_cif_file.get_coordinates(cif_file) #cif= parse_cif_file.get_coordinates(cif_file) outfilename = '{}/{}.csv'.format(out_dir, cif_file_name.split(".cif")[0]) fo = open(outfilename, 'w') for model in cif.keys(): seq = sorted(list(set([(i[0], i[1], i[2]) for i in cif[model].keys()])),key=itemgetter(1, 0)) for r in range(1, len(seq) - 1): phi_atoms = ((seq[r - 1][0], seq[r - 1][1], seq[r - 1][2], 'C'), (seq[r][0], seq[r][1], seq[r][2], 'N'), (seq[r][0], seq[r][1], seq[r][2], 'CA'), (seq[r][0], seq[r][1], seq[r][2], 'C')) psi_atoms = ((seq[r][0], seq[r][1], seq[r][2], 'N'), (seq[r][0], seq[r][1], seq[r][2], 'CA'), (seq[r][0], seq[r][1], seq[r][2], 'C'), (seq[r + 1][0], seq[r + 1][1], seq[r + 1][2], 'N')) try: phi = get_dihedral_angle2(cif[model][phi_atoms[0]], cif[model][phi_atoms[1]], cif[model][phi_atoms[2]], cif[model][phi_atoms[3]]) psi = get_dihedral_angle2(cif[model][psi_atoms[0]], cif[model][psi_atoms[1]], cif[model][psi_atoms[2]], cif[model][psi_atoms[3]]) b=bf[model][phi_atoms[1]] if seq[r+1][2] == 'PRO': rtype='XPR' elif r==1 or r==(len(seq)-2): rtype='TER' elif seq[r][2] == 'GLY': rtype='GLY' else: rtype='REG' fo.write('{},{},{},{},{},{},{}\n'.format(seq[r][0], seq[r][2], round(phi,4), round(psi,4),b,rtype,ent_id)) except KeyError: logging.warning('Coordinate data not found for {}/{}'.format(phi_atoms, psi_atoms)) if __name__ == "__main__": # calculate_dihedral_angles('4txr.cif','/Users/kumaran/Downloads','/Users/kumaran') in_path = sys.argv[1] out_path = sys.argv[2] flist = [_ for _ in os.listdir(in_path) if _.endswith('.cif')] for fname in flist: print (fname) logging.info('Working on {}'.format(fname)) calculate_dihedral_angles(fname, in_path, out_path)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ cdeweb.errors ~~~~~~~~~~~~~ Error views. :copyright: Copyright 2016 by Matt Swain. :license: MIT, see LICENSE file for more details. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging from flask import render_template, request, jsonify from . import app log = logging.getLogger(__name__) def get_message(e): if hasattr(e, 'data') and 'messages' in e.data: return e.data['messages'] if hasattr(e, 'description'): return e.description elif hasattr(e, 'msg'): return e.msg elif hasattr(e, 'message'): return e.message else: return repr(e) @app.errorhandler(400) def forbidden(e): if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html: response = jsonify({'error': 'bad request', 'message': get_message(e)}) response.status_code = 400 return response return render_template('400.html', description=get_message(e)), 400 @app.errorhandler(403) def forbidden(e): if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html: response = jsonify({'error': 'forbidden', 'message': get_message(e)}) response.status_code = 403 return response return render_template('403.html', description=get_message(e)), 403 @app.errorhandler(404) def page_not_found(e): if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html: response = jsonify({'error': 'not found', 'message': get_message(e)}) response.status_code = 404 return response return render_template('404.html', description=get_message(e)), 404 @app.errorhandler(422) def unprocessable_entity(e): if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html: response = jsonify({'error': 'unprocessable entity', 'message': get_message(e)}) response.status_code = 422 return response return render_template('422.html', description=get_message(e)), 422 @app.errorhandler(500) def internal_server_error(e): if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html: response = jsonify({'error': 'internal server error', 'message': get_message(e)}) response.status_code = 500 return response return render_template('500.html', description=get_message(e)), 500 @app.errorhandler(503) def internal_server_error(e): if request.accept_mimetypes.accept_json and not request.accept_mimetypes.accept_html: response = jsonify({'error': 'service unavailable', 'message': get_message(e)}) response.status_code = 503 return response return render_template('503.html', description=get_message(e)), 503
nilq/baby-python
python
from setuptools import setup setup( name='zipf', version='0.1', author='Amira Khan', packages=['zipf'], install_requires=[ 'matplotlib', 'pandas', 'scipy', 'pyyaml', 'pytest'], entry_points={ 'console_scripts': [ 'countwords = zipf.countwords:main', 'collate = zipf.collate:main', 'plotcounts = zipf.plotcounts:main']})
nilq/baby-python
python
from abc import ABC, abstractmethod import ccxt from PySide6 import QtWidgets # import ccxt.async_support as ccxt from XsCore import xsIni from ccxt import Exchange class PluginBase(ABC): name: str = "" display_name: str = "" info: str = "" help_doc = "" # 不重写为没有文档 使用文档说明,为md文件,存放database的plugin_help下 def __init__(self): self.exchange: Exchange = None @abstractmethod def get_ui(self) -> QtWidgets.QVBoxLayout(): pass def init_exchange(self, ex_name): # config = { # 'proxies': { # 'http': 'http://127.0.0.1:41081', # 'https': 'http://127.0.0.1:41081' # }, # 'verbose': True # } config = {} value = xsIni.getAppValue('api_key') if value != '': config['apiKey'] = value value = xsIni.getAppValue('api_secret') if value != '': config['secret'] = value value = xsIni.getAppValue('api_changepass') if value != '': config['password'] = value http = xsIni.getAppValue('daiLi_http') https = xsIni.getAppValue('daiLi_https') if http != '' and https != '': config['proxies'] = { 'http': http, 'https': https } value = xsIni.getAppBool('is_print_log') if value: config['verbose'] = True self.exchange: Exchange = getattr(ccxt, ex_name)(config)
nilq/baby-python
python
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Enable periodic transmission of DB and job-produced content to BigQuery.""" __author__ = [ 'Michael Gainer (mgainer@google.com)', ] import base64 import collections import copy import datetime import logging import os import random import re import sys import traceback import apiclient import httplib2 import oauth2client from common import catch_and_log from common import crypto from common import schema_fields from common import utils as common_utils from controllers import sites from controllers import utils from models import analytics from models import courses from models import custom_modules from models import data_sources from models import jobs from models import roles from models import transforms from modules.dashboard import dashboard from modules.dashboard import tabs from google.appengine.ext import db from google.appengine.ext import deferred # CourseBuilder setup strings XSRF_ACTION_NAME = 'data_pump' DASHBOARD_ACTION = 'data_pump' # Separate permission to be able to push user data delegable to non-super-users ACCESS_PERMISSION = 'push_data' ACCESS_PERMISSION_DESCRIPTION = 'Can push user data outside CourseBuilder.' # Connection parameters for discovering and auth to BigQuery. BIGQUERY_RW_SCOPE = 'https://www.googleapis.com/auth/bigquery' BIGQUERY_API_NAME = 'bigquery' BIGQUERY_API_VERSION = 'v2' # API endpoint for initiating a retryable upload. BIGQUERY_API_UPLOAD_URL_PREFIX = ( 'https://www.googleapis.com/upload/bigquery/v2/projects/') # UI for BigQuery interactive queries BIGQUERY_UI_URL_PREFIX = 'https://bigquery.cloud.google.com/table/' # Max of about 20 min of retries (random exponential backoff from 2^1...2^MAX) MAX_CONSECUTIVE_FAILURES = 10 MAX_RETRY_BACKOFF_SECONDS = 600 # Config for secret PII_SECRET_LENGTH = 20 PII_SECRET_DEFAULT_LIFETIME = '30 days' # Constants for accessing job context settings map UPLOAD_URL = 'upload_url' LAST_START_OFFSET = 'last_start_offset' LAST_END_OFFSET = 'last_end_offset' LAST_PAGE_SENT = 'last_page_sent' LAST_PAGE_NUM_ITEMS = 'last_page_num_items' CONSECUTIVE_FAILURES = 'consecutive_failures' FAILURE_REASON = 'failure_reason' ITEMS_UPLOADED = 'items_uploaded' PII_SECRET = 'pii_secret' # Constants for items within course settings schema DATA_PUMP_SETTINGS_SCHEMA_SECTION = 'data_pump' PROJECT_ID = 'project_id' DATASET_NAME = 'dataset_name' JSON_KEY = 'json_key' TABLE_LIFETIME = 'table_lifetime' PII_ENCRYPTION_TOKEN = 'pii_encryption_token' def _get_data_source_class_by_name(name): source_classes = data_sources.Registry.get_rest_data_source_classes() for source_class in source_classes: if source_class.__name__ == name and source_class.exportable(): return source_class return None class DataPumpJob(jobs.DurableJobBase): @staticmethod def get_description(): """Job to push data from CourseBuilder to BigQuery. The job operates from the deferred queue, and takes advantage of the underlying TaskQueue retry and backoff support. One job is created for each DataSource (see models/data_source). This job moves data from the paginated data source up to Google BigQuery via the retryable POST method. Jobs here run on the TaskQueue named "default along with all other CB deferred tasks because that queue has a reasonable set of config parameters. However, there is nothing about these jobs that requires interleaving with others if queue parameters need to be tuned. Functional tests will need to be changed to have execute_all_deferred_tasks() pass the name of the new queue. """ def __init__(self, app_context, data_source_class_name): if not _get_data_source_class_by_name(data_source_class_name): raise ValueError( 'No such data source "%s", or data source is not marked ' 'as exportable.' % data_source_class_name) super(DataPumpJob, self).__init__(app_context) self._data_source_class_name = data_source_class_name self._job_name = 'job-datapump-%s-%s' % (self._data_source_class_name, self._namespace) def non_transactional_submit(self): """Callback used when UI gesture indicates this job should start.""" sequence_num = super(DataPumpJob, self).non_transactional_submit() deferred.defer(self.main, sequence_num) return sequence_num def _mark_job_canceled(self, job, message, duration): """Override default behavior of setting job.output to error string.""" if job.output: job_context, data_source_context = self._load_state( job, job.sequence_num) else: job_context = self._build_job_context(None, None) data_source_context = self._build_data_source_context() job_context[FAILURE_REASON] = message self._save_state(jobs.STATUS_CODE_FAILED, job, job.sequence_num, job_context, data_source_context, use_transaction=False) def _build_data_source_context(self): """Set up context class specific to data source type we pull from.""" data_source_class = _get_data_source_class_by_name( self._data_source_class_name) context_class = data_source_class.get_context_class() # TODO(mgainer): if we start getting timeout failures, perhaps learn # proper chunk size from history, rather than using default. default_chunk_size = data_source_class.get_default_chunk_size() return context_class.build_blank_default({}, default_chunk_size) def _build_job_context(self, upload_url, pii_secret): """Set up context object used to maintain this job's internal state.""" job_context = { UPLOAD_URL: upload_url, LAST_START_OFFSET: 0, LAST_END_OFFSET: -1, LAST_PAGE_SENT: -1, LAST_PAGE_NUM_ITEMS: 0, CONSECUTIVE_FAILURES: [], FAILURE_REASON: '', ITEMS_UPLOADED: 0, PII_SECRET: pii_secret, } return job_context def _load_state(self, job, sequence_num): if job.sequence_num != sequence_num: raise ValueError( 'Abandoning stale job with sequence %d; ' 'there is a new job with sequence %d running.' % ( sequence_num, job.sequence_num)) data_source_class = _get_data_source_class_by_name( self._data_source_class_name) content = transforms.loads(job.output) job_context = content['job_context'] data_source_context_class = data_source_class.get_context_class() data_source_context = data_source_context_class.build_from_dict( content['data_source_context']) return job_context, data_source_context def _save_state(self, state, job, sequence_num, job_context, data_source_context, use_transaction=True): # Job context may have been made with blank values for these two items. # Recover them from the previous context if they are not set (and if # the previous context is present enough to have them) try: prev_job_context, _ = self._load_state(job, sequence_num) if not job_context[PII_SECRET]: job_context[PII_SECRET] = prev_job_context[PII_SECRET] if not job_context[UPLOAD_URL]: job_context[UPLOAD_URL] = prev_job_context[UPLOAD_URL] except (ValueError, AttributeError): pass # Convert data source context object to plain dict. data_source_class = _get_data_source_class_by_name( self._data_source_class_name) context_class = data_source_class.get_context_class() data_source_context_dict = context_class.save_to_dict( data_source_context) # Set job object state variables. now = datetime.datetime.now() job.output = transforms.dumps({ 'job_context': job_context, 'data_source_context': data_source_context_dict, }) job.status_code = state job.execution_time_sec += int((now - job.updated_on).total_seconds()) job.updated_on = now logging.info('Data pump job %s saving contexts: %s %s', self._job_name, str(job_context), str(data_source_context)) # Using _update in DurableJobEntity # pylint: disable=protected-access if use_transaction: xg_on = db.create_transaction_options(xg=True) db.run_in_transaction_options( xg_on, jobs.DurableJobEntity._update, self._job_name, sequence_num, job.status_code, job.output, job.execution_time_sec) else: jobs.DurableJobEntity._update(self._job_name, sequence_num, job.status_code, job.output, job.execution_time_sec) @classmethod def _parse_pii_encryption_token(cls, token): parts = token.split('/') return (parts[0], datetime.datetime(year=1970, month=1, day=1) + datetime.timedelta(seconds=int(parts[1]))) @classmethod def _is_pii_encryption_token_valid(cls, token): try: _, valid_until_date = cls._parse_pii_encryption_token(token) return valid_until_date > datetime.datetime.now() except ValueError: return False @classmethod def _build_new_pii_encryption_token(cls, timedelta_string): hmac_secret = base64.urlsafe_b64encode( os.urandom(int(PII_SECRET_LENGTH * 0.75))) table_lifetime_seconds = common_utils.parse_timedelta_string( timedelta_string).total_seconds() unix_epoch = datetime.datetime(year=1970, month=1, day=1) now = datetime.datetime.now() table_lifetime_timedelta = datetime.timedelta( seconds=table_lifetime_seconds) valid_until_timestamp = int( (now - unix_epoch + table_lifetime_timedelta).total_seconds()) pii_encryption_token = '%s/%d' % (hmac_secret, valid_until_timestamp) return pii_encryption_token @classmethod def _get_pii_token(cls, app_context): """Retrieve or generate and save a secret used to encrypt exported PII. All PII data in objects exported to BigQuery is either suppressed or transformed via a one-way hash using a secret value. The point of the transformation is so that exported data cannot trivially be correlated to any individual's data in CourseBuilder, but records in exported data encoded using the same key can. (E.g., a user_id is the key for students; this key should be usable to correlate a user's language preference with his test scores.) Once data has been exported from CourseBuilder to BigQuery, the internal permissions from CourseBuilder no longer apply. To minimize the ability of those with access to the data to perform long-term correlations that might identify individuals, the secret used to encode PII is automatically rotated on a period determined by the course settings. We re-use the expiration period for tables, or default to 30 days if no period is selected. The format for the stored setting is a string composed of: - A randomly-generated secret encoded as a base-64 string - A slash character ('/') - A Unix timestamp indicating the expiration date of the token. The expiration date approach is chosen so that within the expiration period, different data sources can be re-exported multiple times, but still correlated with one another in BigQuery. Upon expiration, a new token is generated and used. Data exported before and after the changeover cannot be directly correlated. (It may be possible to force a correlation if old versions of the data tables were downloaded by comparing non-key fields in the old/new versions, if the non-key fields are sufficiently discriminative) Args: app_context: Standard CB application context object. Returns: Secret string used for encoding PII data upon export. """ course_settings = app_context.get_environ() pump_settings = course_settings.get(DATA_PUMP_SETTINGS_SCHEMA_SECTION, {}) pii_encryption_token = pump_settings.get(PII_ENCRYPTION_TOKEN) if (not pii_encryption_token or not cls._is_pii_encryption_token_valid(pii_encryption_token)): pii_encryption_token = cls._build_new_pii_encryption_token( pump_settings.get(TABLE_LIFETIME, PII_SECRET_DEFAULT_LIFETIME)) pump_settings[PII_ENCRYPTION_TOKEN] = pii_encryption_token course = courses.Course(None, app_context=app_context) course.save_settings(course_settings) return pii_encryption_token @classmethod def _get_pii_secret(cls, app_context): secret, _ = cls._parse_pii_encryption_token( cls._get_pii_token(app_context)) return secret def _get_bigquery_settings(self, app_context): """Pull settings necessary for using BigQuery from DB. This is nice and verbose and paranoid, so that if there is any misconfiguration, the end-user gets a nice message that's specific about the particular problem, rather than just a KeyError or ValueError. Args: app_context: The standard app context for the course in question. Returns: A namedtuple containing private_key, client_email, project_id and dataset_id members. The first three are required to connect to BigQuery, and the last is the dataset within BigQuery to which the data pump will restrict itself for insert/write/delete operations. Raises: ValueError: if any expected element is missing or malformed. """ pump_settings = app_context.get_environ().get( DATA_PUMP_SETTINGS_SCHEMA_SECTION, {}) dataset_id = ( pump_settings.get(DATASET_NAME) or re.sub('[^a-z_:-]', '', app_context.get_slug().lower()) or 'course') project_id = pump_settings.get(PROJECT_ID) if not project_id: raise ValueError('Cannot pump data without a course settings value ' 'for the target Google BigQuery project ID') json_key = pump_settings.get(JSON_KEY) if not json_key: raise ValueError('Cannot pump data without a JSON client key ' 'allowing access to the target Google BigQuery ' 'project') try: json_key = transforms.loads(json_key) except ValueError: raise ValueError('Cannot decode JSON client key for the target ' 'Google BigQuery project.') if 'private_key' not in json_key or 'client_email' not in json_key: raise ValueError('The JSON client key for the target Google ' 'BigQuery project does not seem to be well ' 'formed; either the "private_key" or ' '"client_email" field is missing.') table_lifetime_seconds = common_utils.parse_timedelta_string( pump_settings.get(TABLE_LIFETIME, '')).total_seconds() Settings = collections.namedtuple('Settings', [ 'private_key', 'client_email', PROJECT_ID, 'dataset_id', 'table_lifetime_seconds']) return Settings(json_key['private_key'], json_key['client_email'], project_id, dataset_id, table_lifetime_seconds) def _get_bigquery_service(self, bigquery_settings): """Get BigQuery API client plus HTTP client with auth credentials.""" credentials = oauth2client.client.SignedJwtAssertionCredentials( bigquery_settings.client_email, bigquery_settings.private_key, BIGQUERY_RW_SCOPE) http = httplib2.Http() http = credentials.authorize(http) return apiclient.discovery.build(BIGQUERY_API_NAME, BIGQUERY_API_VERSION, http=http), http def _maybe_create_course_dataset(self, service, bigquery_settings): """Create dataset within BigQuery if it's not already there.""" datasets = service.datasets() try: datasets.get(projectId=bigquery_settings.project_id, datasetId=bigquery_settings.dataset_id).execute() except apiclient.errors.HttpError, ex: if ex.resp.status != 404: raise datasets.insert(projectId=bigquery_settings.project_id, body={ 'datasetReference': { 'projectId': bigquery_settings.project_id, 'datasetId': bigquery_settings.dataset_id }}).execute() def _maybe_delete_previous_table(self, tables, bigquery_settings): """Delete previous version of table for data source, if it exists.""" # TODO(mgainer): Make clobbering old table and replacing optional. # For now, we assume people will be writing queries in terms of # a single table name, and will be irritated at having to change # their queries all the time if we add a timestamp to the table # name. And no, AFAICT, the BigQuery API does not permit renaming # of tables, just creation and deletion. table_name = self._data_source_class_name.replace('DataSource', '') try: tables.delete(projectId=bigquery_settings.project_id, datasetId=bigquery_settings.dataset_id, tableId=table_name).execute() except apiclient.errors.HttpError, ex: if ex.resp.status != 404: raise def _json_schema_member_to_bigquery_schema(self, name, structure): item = {'name': name} if 'description' in structure: item['description'] = structure['description'] if 'properties' in structure: # It's a sub-registry. item['type'] = 'RECORD' item['mode'] = 'NULLABLE' item['fields'] = self._json_schema_to_bigquery_schema( structure['properties']) elif 'items' in structure: # It's an array if 'items' in structure['items']: raise ValueError( 'BigQuery schema descriptions do not support nesting ' 'arrays directly in other arrays. Instead, nest ' 'structures in arrays; those structures may contain ' 'sub-arrays. Problem arises trying to pump data for %s' % self._data_source_class_name) item = self._json_schema_member_to_bigquery_schema( name, structure['items']) item['mode'] = 'REPEATED' else: item['mode'] = ('NULLABLE' if structure.get('optional') else 'REQUIRED') if structure['type'] in ('string', 'text', 'html', 'url', 'file'): item['type'] = 'STRING' elif structure['type'] in 'integer': item['type'] = 'INTEGER' elif structure['type'] in 'number': item['type'] = 'FLOAT' elif structure['type'] in 'boolean': item['type'] = 'BOOLEAN' elif structure['type'] in ('date', 'datetime'): item['type'] = 'TIMESTAMP' else: raise ValueError( 'Unrecognized schema scalar type "%s" ' 'when trying to make schema for data-pumping %s' % ( structure['type'], self._data_source_class_name)) return item def _json_schema_to_bigquery_schema(self, json_schema_dict): fields = [] for name, structure in json_schema_dict.iteritems(): fields.append(self._json_schema_member_to_bigquery_schema( name, structure)) return fields def _create_data_table(self, tables, bigquery_settings, schema): """Instantiate and provide schema for new BigQuery table.""" table_name = self._data_source_class_name.replace('DataSource', '') request = { 'kind': 'bigquery#table', 'tableReference': { 'projectId': bigquery_settings.project_id, 'datasetId': bigquery_settings.dataset_id, 'tableId': table_name, }, 'schema': {'fields': schema} } # If user has requested it, set the time at which table should be # reclaimed (as milliseconds since Unix epoch). if bigquery_settings.table_lifetime_seconds: now = datetime.datetime.now() expiration_delta = datetime.timedelta( seconds=bigquery_settings.table_lifetime_seconds) unix_epoch = datetime.datetime(year=1970, month=1, day=1) expiration_ms = int( (now + expiration_delta - unix_epoch).total_seconds()) * 1000 request['expirationTime'] = expiration_ms # Allow exceptions from here to propagate; we don't expect any problems, # so if we have any, the upload should abort. tables.insert( projectId=bigquery_settings.project_id, datasetId=bigquery_settings.dataset_id, body=request).execute() def _create_upload_job(self, http, bigquery_settings): """Before uploading, we must create a job to handle the upload. Args: http: An HTTP client object configured to send our auth token bigquery_settings: Configs for talking to bigquery. Returns: URL specific to this upload job. Subsequent PUT requests to send pages of data must be sent to this URL. Raises: Exception: on unexpected responses from BigQuery API. """ uri = '%s%s/jobs?uploadType=resumable' % ( BIGQUERY_API_UPLOAD_URL_PREFIX, bigquery_settings.project_id) headers = { 'Content-Type': 'application/json', 'X-Upload-Content-Type': 'application/octet-stream', } table_name = self._data_source_class_name.replace('DataSource', '') body = transforms.dumps({ 'kind': 'bigquery#job', 'configuration': { 'load': { 'createDisposition': 'CREATE_NEVER', # Already exists. 'destinationTable': { 'projectId': bigquery_settings.project_id, 'datasetId': bigquery_settings.dataset_id, 'tableId': table_name, }, 'ignoreUnknownValues': False, 'sourceFormat': 'NEWLINE_DELIMITED_JSON', } } }) response, content = http.request(uri, method='POST', body=body, headers=headers) if int(response.get('status', 0)) != 200: raise Exception('Got non-200 response when trying to create a ' 'new upload job. Reponse was: "%s"; content ' 'was "%s"' % (str(response), str(content))) location = response.get('location') if not location: raise Exception('Expected response to contain a "location" item ' 'giving a URL to send subsequent content to, but ' 'instead got "%s"' % str(response)) return location def _initiate_upload_job(self, bigquery_service, bigquery_settings, http, app_context): """Coordinate table cleanup, setup, and initiation of upload job.""" data_source_class = _get_data_source_class_by_name( self._data_source_class_name) catch_and_log_ = catch_and_log.CatchAndLog() table_schema = data_source_class.get_schema(app_context, catch_and_log_) schema = self._json_schema_to_bigquery_schema(table_schema) tables = bigquery_service.tables() self._maybe_create_course_dataset(bigquery_service, bigquery_settings) self._maybe_delete_previous_table(tables, bigquery_settings) self._create_data_table(tables, bigquery_settings, schema) upload_url = self._create_upload_job(http, bigquery_settings) return upload_url def _note_retryable_failure(self, message, job_context): """Log a timestamped message into the job context object.""" timestamp = datetime.datetime.now().strftime( utils.HUMAN_READABLE_DATETIME_FORMAT) job_context[CONSECUTIVE_FAILURES].append(timestamp + ' ' + message) def _randomized_backoff_timeout(self, job_context): num_failures = len(job_context[CONSECUTIVE_FAILURES]) if not num_failures: return 0 return min(MAX_RETRY_BACKOFF_SECONDS, random.randrange(2 ** num_failures, 2 ** (num_failures + 1))) def _check_upload_state(self, http, job_context): """Check with the BigQuery upload server to get state of our upload. Due to various communication failure cases, we may not be aware of the actual state of the upload as known to the server. Issue a blank PUT request to evoke a response that will indicate: - How far along we are in the upload - Whether the upload has already completed - Whether the upload job has taken too long and expired Args: http: An HTTP client object configured to send our auth token job_context: Hash containing configuration for this upload job. Returns: A 2-tuple of next page to load (or None if no page should be loaded), and the next jobs.STATUS_CODE_<X> to transition to. """ response, _ = http.request(job_context[UPLOAD_URL], method='PUT', headers={'Content-Range': 'bytes */*'}) return self._handle_put_response(response, job_context, is_upload=False) def _send_data_page_to_bigquery(self, data, is_last_chunk, next_page, http, job, sequence_num, job_context, data_source_context): # BigQuery expects one JSON object per newline-delimed record, # not a JSON array containing objects, so convert them individually. # Less efficient, but less hacky than converting and then string # manipulation. lines = [] total_len = 0 for item in data: line = transforms.dumps(item) line += '\n' total_len += len(line) lines.append(line) # Round data size up to next multiple of 256K, per # https://cloud.google.com/bigquery/loading-data-post-request#chunking padding_amount = 0 if not is_last_chunk: round_to = 256 * 1024 if total_len % round_to: padding_amount = round_to - (total_len % round_to) lines.append(' ' * padding_amount) payload = ''.join(lines) # We are either re-attempting to send a page, or sending a new page. # Adjust the job_context's last-sent state to reflect this. job_context[LAST_PAGE_NUM_ITEMS] = len(data) if next_page == job_context[LAST_PAGE_SENT]: job_context[LAST_END_OFFSET] = ( job_context[LAST_START_OFFSET] + len(payload) - 1) elif next_page == job_context[LAST_PAGE_SENT] + 1: job_context[LAST_PAGE_SENT] = next_page job_context[LAST_START_OFFSET] = ( job_context[LAST_END_OFFSET] + 1) job_context[LAST_END_OFFSET] = ( job_context[LAST_START_OFFSET] + len(payload) - 1) else: raise Exception( 'Internal error - unexpected condition in sending page. ' 'next_page=%d last_page=%d, num_items=%d' % ( next_page, job_context[LAST_PAGE_SENT], len(data))) logging.info( 'Sending to BigQuery. %d items; %d padding bytes; is-last: %s', len(data), padding_amount, str(is_last_chunk)) headers = { 'Content-Range': 'bytes %d-%d/%s' % ( job_context[LAST_START_OFFSET], job_context[LAST_END_OFFSET], (job_context[LAST_END_OFFSET] + 1) if is_last_chunk else '*') } response, _ = http.request(job_context[UPLOAD_URL], method='PUT', body=payload, headers=headers) _, next_state = self._handle_put_response(response, job_context, is_upload=True) return next_state def _handle_put_response(self, response, job_context, is_upload=True): """Update job_context state depending on response from BigQuery.""" status = int(response['status']) logging.info('Response from bigquery: %d; %s', status, str(response)) next_page = None next_status = jobs.STATUS_CODE_STARTED if status == 308: # Google's push-partial-data usurps the usual meaning of 308 to # instead mean "partial request incomplete"; here, it's telling # us that the request has partially completed, and it will give # us a Range: header to indicate how far it thinks we've gone. # We only care about the upper end of the range. if 'range' not in response: last_offset_received = -1 else: last_offset_received = int(response['range'].split('-')[1]) if last_offset_received == job_context[LAST_END_OFFSET]: # The nominal case; the reported index of the last byte # received exactly matches what we think we sent. Tell our # caller we are ready to try the next page, and count up # the total number of items sent only now that we have seen # the receiving side's acknowledgement. next_page = job_context[LAST_PAGE_SENT] + 1 job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS] job_context[LAST_PAGE_NUM_ITEMS] = 0 # Don't clear the list of failures if this is handling the # pre-check done before uploading. Experiments show that # persistent problems with our requests result in 503's on # upload, but 308's (reporting no progress made) on check. # We want to eventually fail out if we're constantly getting # errors, so ignore the "success" on checking status. if is_upload: job_context[CONSECUTIVE_FAILURES] = [] elif (last_offset_received >= job_context[LAST_START_OFFSET] - 1 and last_offset_received < job_context[LAST_END_OFFSET]): # If the last offset received is not the same as the last offset # sent, that's possibly OK; verify that the last offset received # is sane. Here, "sane" means that we accept seeing the # last offset of the previous page sent (last_start_offset-1) # up to, but not including the last_end_offset (for the page # we just sent). Anything lower means that our algorithm # mistakenly skipped past a failure. Anything higher means # that we have somehow become confused and decided to step # backward (or BigQuery is lying to us). prev_page_size = (job_context[LAST_END_OFFSET] - job_context[LAST_START_OFFSET] + 1) bytes_received = (last_offset_received - job_context[LAST_START_OFFSET] + 1) self._note_retryable_failure( 'Incomplete upload detected - %d of %d bytes received ' 'for page %d' % (bytes_received, prev_page_size, job_context[LAST_PAGE_SENT]), job_context) next_page = job_context[LAST_PAGE_SENT] else: raise ValueError( 'Uploaded byte count of %d does not fall in the range ' '%d to %d, the start/end range for previously-sent page ' 'number %d. Abandoning upload.' % ( last_offset_received, job_context[LAST_START_OFFSET], job_context[LAST_END_OFFSET], job_context[LAST_PAGE_SENT])) elif status in (200, 201): # BigQuery confirms that it has seen the upload complete. (Note # that this is *not* a promise that the upload has parsed # correctly; there doesn't seem to be a clean way to ask about # that other than to probe the table for number of rows uploaded # until we see the desired number or time out. Ick.) job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS] job_context[LAST_PAGE_NUM_ITEMS] = 0 next_status = jobs.STATUS_CODE_COMPLETED elif status == 404: # Unlikely, but possible. For whatever reason, BigQuery has # decided that our upload URL is no longer valid. (Docs say that # we are allowed up to a day to get an upload done, but do not # promise that this is the only reason a job may become invalid.) # We need to start again from scratch. To start over, we will # just skip uploading a data page this round, and set ourselves up # to be called back again from the deferred-tasks queue. When the # callback happens, STATUS_CODE_QUEUED will indicate we need to # re-init everything from scratch. next_status = jobs.STATUS_CODE_QUEUED elif status in (500, 502, 503, 504): # Server Error, Bad Gateway, Service Unavailable or Gateway Timeout. # In all of these cases, we do a randomized exponential delay before # retrying. self._note_retryable_failure('Retryable server error %d' % status, job_context) else: raise ValueError( 'Got unexpected status code %d from BigQuery in response %s' % (status, str(response))) return next_page, next_status def _fetch_page_data(self, app_context, data_source_context, next_page): """Get the next page of data from the data source.""" data_source_class = _get_data_source_class_by_name( self._data_source_class_name) catch_and_log_ = catch_and_log.CatchAndLog() is_last_page = False with catch_and_log_.propagate_exceptions('Loading page of data'): schema = data_source_class.get_schema(app_context, catch_and_log_) required_jobs = data_sources.utils.get_required_jobs( data_source_class, app_context, catch_and_log_) data, _ = data_source_class.fetch_values( app_context, data_source_context, schema, catch_and_log_, next_page, *required_jobs) if (data_source_class.get_default_chunk_size() == 0 or not hasattr(data_source_context, 'chunk_size') or len(data) < data_source_context.chunk_size): is_last_page = True else: # Here, we may have read to the end of the table and just # happened to end up on an even chunk boundary. Attempt to # read one more row so that we can discern whether we really # are at the end. # Don't use the normal data_source_context; we don't want it # to cache a cursor for the next page that will only retrieve # one row. throwaway_context = copy.deepcopy(data_source_context) throwaway_context.chunk_size = 1 next_data, actual_page = data_source_class.fetch_values( app_context, throwaway_context, schema, catch_and_log_, next_page + 1, *required_jobs) if not next_data or actual_page == next_page: is_last_page = True return data, is_last_page def _send_next_page(self, sequence_num, job): """Coordinate table setup, job setup, sending pages of data.""" # Gather necessary resources app_context = sites.get_course_index().get_app_context_for_namespace( self._namespace) pii_secret = self._get_pii_secret(app_context) bigquery_settings = self._get_bigquery_settings(app_context) bigquery_service, http = self._get_bigquery_service(bigquery_settings) # If this is our first call after job start (or we have determined # that we need to start over from scratch), do initial setup. # Otherwise, re-load context objects from saved version in job.output if job.status_code == jobs.STATUS_CODE_QUEUED: upload_url = self._initiate_upload_job( bigquery_service, bigquery_settings, http, app_context) job_context = self._build_job_context(upload_url, pii_secret) data_source_context = self._build_data_source_context() else: job_context, data_source_context = self._load_state( job, sequence_num) if hasattr(data_source_context, 'pii_secret'): data_source_context.pii_secret = pii_secret logging.info('Data pump job %s loaded contexts: %s %s', self._job_name, str(job_context), str(data_source_context)) # Check BigQuery's state. Based on that, choose the next page of data # to push. Depending on BigQuery's response, we may or may not be # able to send a page now. next_page, next_state = self._check_upload_state(http, job_context) if next_page is not None: data, is_last_chunk = self._fetch_page_data( app_context, data_source_context, next_page) next_state = self._send_data_page_to_bigquery( data, is_last_chunk, next_page, http, job, sequence_num, job_context, data_source_context) self._save_state(next_state, job, sequence_num, job_context, data_source_context) # If we are not done, enqueue another to-do item on the deferred queue. if len(job_context[CONSECUTIVE_FAILURES]) >= MAX_CONSECUTIVE_FAILURES: raise Exception('Too many consecutive failures; abandoning job.') elif not job.has_finished: backoff_seconds = self._randomized_backoff_timeout(job_context) logging.info('%s re-queueing for subsequent work', self._job_name) deferred.defer(self.main, sequence_num, _countdown=backoff_seconds) else: logging.info('%s complete', self._job_name) def main(self, sequence_num): """Callback entry point. Manage namespaces, failures; send data.""" logging.info('%s de-queued and starting work.', self._job_name) job = self.load() if not job: raise deferred.PermanentTaskFailure( 'Job object for %s not found!' % self._job_name) if job.has_finished: return # We have been canceled; bail out immediately. with common_utils.Namespace(self._namespace): try: self._send_next_page(sequence_num, job) except Exception, ex: try: # Log origin of exception to permit troubleshooting. # Do this in try/finally block to conform to Python docs' # recommendation to avoid circular reference to traceback # object. origin_traceback = sys.exc_info()[2] logging.critical('%s: job abandoned due to fatal error %s', self._job_name, str(ex)) logging.critical(''.join( traceback.format_tb(origin_traceback))) finally: pass # Log failure in job object as well. if job.output: job_context, data_source_context = self._load_state( job, sequence_num) else: job_context = self._build_job_context(None, None) data_source_context = (self._build_data_source_context()) job_context[FAILURE_REASON] = str(ex) self._save_state(jobs.STATUS_CODE_FAILED, job, sequence_num, job_context, data_source_context) # PermanentTaskFailure tells deferred queue to give up on us. raise deferred.PermanentTaskFailure('Job %s failed: %s' % ( self._job_name, str(ex))) def get_display_dict(self, app_context): """Set up dict for Jinja rendering on data_pump.html.""" ret = { 'name': self._data_source_class_name, 'status': 'Has Never Run', 'active': False, } job = self.load() if job: ret['status'] = jobs.STATUS_CODE_DESCRIPTION[job.status_code] ret['active'] = not job.has_finished ret['sequence_number'] = job.sequence_num ret['updated_on'] = job.updated_on.strftime( utils.HUMAN_READABLE_TIME_FORMAT) if job.has_finished: duration = job.execution_time_sec else: duration = int((datetime.datetime.now() - job.updated_on) .total_seconds()) ret['duration'] = datetime.timedelta(days=0, seconds=duration) ret['last_updated'] = job.updated_on.strftime( utils.HUMAN_READABLE_DATETIME_FORMAT) bigquery_settings = self._get_bigquery_settings(app_context) ret['bigquery_url'] = '%s%s:%s.%s' % ( BIGQUERY_UI_URL_PREFIX, bigquery_settings.project_id, bigquery_settings.dataset_id, self._data_source_class_name.replace('DataSource', '')) try: job_context, _ = self._load_state(job, job.sequence_num) ret['job_context'] = job_context current_secret = DataPumpJob._get_pii_secret(app_context) if job_context[PII_SECRET] != current_secret: ret['pii_secret_is_out_of_date'] = True del job_context[PII_SECRET] except (ValueError, AttributeError): # When jobs framework catches a failure, it overwrites the # job.output with the failure message as a string. We will # get here if we fail to parse job.output as a JSON-packed # object. ret['message'] = job.output data_source_class = _get_data_source_class_by_name( self._data_source_class_name) ret['source_url'] = '%s/rest/data/%s/items?chunk_size=10' % ( app_context.get_slug(), data_source_class.get_name()) catch_and_log_ = catch_and_log.CatchAndLog() ret['schema'] = data_source_class.get_schema(app_context, catch_and_log_) ret['generator_statuses'] = [] ret['available'] = True ret['any_generator_running'] = False required_generators = data_source_class.required_generators() if not required_generators: ret['generator_statuses'].append('(No dependencies)') ret['has_any_generators'] = False else: ret['has_any_generators'] = True for generator_class in required_generators: generator = generator_class(app_context) job = generator.load() ret['generator_statuses'].append( analytics.display.get_generator_status_message( generator_class, job)) if not job or job.status_code != jobs.STATUS_CODE_COMPLETED: ret['available'] = False if job and not job.has_finished: ret['any_generator_running'] = True return ret class DataPumpJobsDataSource(data_sources.SynchronousQuery): """Present DataPump job status as an analytic generated at page-render time. This is a very mild hack. Since the data pump job controls show up as a sub-tab under Dashboard -> Analytics, the easiest way to generate tab content is to act as though we are an analytic. And we are, in a sense - this analytic just happens to generate a table of data-pump job statuses, rather than analytics about student performance. This also conveniently re-uses all the mechanics for authorization, dispatch, page-painting, etc. """ @staticmethod def required_generators(): return [] @staticmethod def fill_values(app_context, template_values): template_values['xsrf_token'] = ( crypto.XsrfTokenManager.create_xsrf_token(XSRF_ACTION_NAME)) source_classes = [ ds for ds in data_sources.Registry.get_rest_data_source_classes() if ds.exportable()] source_classes.sort(key=lambda c: c.__name__) # pylint: disable=protected-access template_values['pumps'] = [] for source_class in source_classes: job = DataPumpJob(app_context, source_class.__name__) template_values['pumps'].append(job.get_display_dict(app_context)) pump_settings = app_context.get_environ().get( DATA_PUMP_SETTINGS_SCHEMA_SECTION, {}) template_values['need_settings'] = ( not pump_settings.has_key(PROJECT_ID) or not pump_settings.has_key(JSON_KEY)) template_values[DATASET_NAME] = pump_settings.get(DATASET_NAME) custom_module = None class DashboardExtension(object): """Respond to UI run/cancel commands for individual data pump jobs.""" @classmethod def register(cls): # Register new permission for pushing student data to external location. dashboard.DashboardHandler.add_external_permission( ACCESS_PERMISSION, ACCESS_PERMISSION_DESCRIPTION) # Register a new Analytics sub-tab for showing data pump status and # start/stop buttons. data_pump_visualization = analytics.Visualization( 'data_pumps', 'Data Pumps', 'data_pump.html', data_source_classes=[DataPumpJobsDataSource]) tabs.Registry.register('analytics', 'data_pump', 'Data Pump', [data_pump_visualization]) def post_action(handler): cls(handler).post_data_pump() dashboard.DashboardHandler.post_actions.append(DASHBOARD_ACTION) setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION, post_action) dashboard.DashboardHandler.map_action_to_permission( 'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION) @classmethod def unregister(cls): dashboard.DashboardHandler.post_actions.remove(DASHBOARD_ACTION) setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION, None) dashboard.DashboardHandler.unmap_action_to_permission( 'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION) dashboard.DashboardHandler.remove_external_permission(ACCESS_PERMISSION) roles.Roles.unregister_permissions(custom_module) def post_data_pump(self): source_name = self.handler.request.get('data_source') data_source_class = _get_data_source_class_by_name(source_name) if data_source_class: data_pump_job = DataPumpJob(self.handler.app_context, source_name) action = self.handler.request.get('pump_action') if action == 'start_pump': data_pump_job.submit() elif action == 'cancel_pump': data_pump_job.cancel() elif action == 'run_generators': for generator_class in data_source_class.required_generators(): generator_class(self.handler.app_context).submit() elif action == 'cancel_generators': for generator_class in data_source_class.required_generators(): generator_class(self.handler.app_context).cancel() self.handler.redirect(self.handler.get_action_url( 'analytics', extra_args={'tab': 'data_pump'}, fragment=source_name)) def __init__(self, handler): self.handler = handler def register_module(): """Adds this module to the registry. Called once at startup.""" project_id = schema_fields.SchemaField( DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PROJECT_ID, 'Project ID', 'string', description='The ID (not the name!) of the Project to which to ' 'send data. See the list of projects and their IDs at ' 'https://console.developers.google.com/project', i18n=False) dataset_name = schema_fields.SchemaField( DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + DATASET_NAME, 'Dataset Name', 'string', description='Name of the BigQuery dataset to which to pump tables. ' 'If not set, this will default to the name of the course.', optional=True, i18n=False) json_key = schema_fields.SchemaField( DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + JSON_KEY, 'JSON Key', 'text', i18n=False, description='Contents of a JSON key created in the Developers Console ' 'for the instance where BigQuery is to be run. See ' # TODO(mgainer): Get CB location of instructions to get client key # for destination application. 'the instructions at ') table_lifetime = schema_fields.SchemaField( DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + TABLE_LIFETIME, 'Table Lifetime', 'string', optional=True, i18n=False, description='Amount of time a table pushed to BigQuery will last. ' 'After this amount of time, the table will be automatically deleted. ' '(This is useful if your data retention or privacy policy mandates ' 'a limited time for analysis after which personal data must be ' 'removed.) Leaving this field blank or setting it to zero will ' 'cause BigQuery to indefinitely retain data. Supported units are: ' '"weeks", "days", "hours", "minutes", "seconds". Units may be ' 'specified as their first letter, singular, or plural. Spaces ' 'and commas may be used or omitted. E.g., both of the following ' 'are equivalent: "3w1d7h", "3 weeks, 1 day, 7 hours"') pii_encryption_token = schema_fields.SchemaField( DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PII_ENCRYPTION_TOKEN, 'PII Encryption Token', 'string', optional=True, i18n=False, editable=False, description='Automatically generated encryption secret used to ' 'obscure PII fields when these are pushed to BigQuery. This ' 'key lasts only as long as the Table Lifetime setting above, or ' '30 days if the limit is not set. After this secret has expired, ' 'a new secret will be generated. PII items with the same un-obscured ' 'value which are obscured with different values for this secret will ' 'have different values. Most importantly, this means that joins on ' 'fields that should be the same (e.g., user ID) will not work.') course_settings_fields = ( lambda c: project_id, lambda c: json_key, lambda c: dataset_name, lambda c: table_lifetime, lambda c: pii_encryption_token, ) def on_module_enabled(): data_sources.Registry.register(DataPumpJobsDataSource) courses.Course.OPTIONS_SCHEMA_PROVIDERS[ DATA_PUMP_SETTINGS_SCHEMA_SECTION] += course_settings_fields tabs.Registry.register('settings', 'data_pump', 'Data Pump', DATA_PUMP_SETTINGS_SCHEMA_SECTION) DashboardExtension.register() def on_module_disabled(): for field in course_settings_fields: courses.Course.OPTIONS_SCHEMA_PROVIDERS[ DATA_PUMP_SETTINGS_SCHEMA_SECTION].remove(field) DashboardExtension.unregister() global custom_module custom_module = custom_modules.Module( 'Data Pump', 'Pushes DB and generated content to a BigQuery project', [], [], notify_module_enabled=on_module_enabled, notify_module_disabled=on_module_disabled) return custom_module
nilq/baby-python
python
# link:https://leetcode.com/problems/design-browser-history/ class BrowserHistory: def __init__(self, homepage: str): self.forw_memo = [] # forw_memo stores the future url self.back_memo = [] # back_memo stores the previous url self.curr_url = homepage def visit(self, url: str) -> None: self.back_memo.append(self.curr_url) self.curr_url = url self.forw_memo = [] # clear forw_memo def back(self, steps: int) -> str: while self.back_memo and steps >= 1: self.forw_memo.append(self.curr_url) pop_url = self.back_memo.pop() self.curr_url = pop_url steps -= 1 return self.curr_url def forward(self, steps: int) -> str: while self.forw_memo and steps >= 1: self.back_memo.append(self.curr_url) pop_url = self.forw_memo.pop() self.curr_url = pop_url steps -= 1 return self.curr_url
nilq/baby-python
python
''' Models utility module. ''' import tensorflow as tf def dense(input_size,output_size,depth,size): '''Create a dense model with specific input_size,output_size,depth and number of neuros.''' layers = [tf.keras.layers.Flatten(input_shape=(input_size,input_size,3))] for i in range(depth): layers.append(tf.keras.layers.Dense(size,activation='relu')) layers.append(tf.keras.layers.Dense(output_size)) return tf.keras.Sequential(layers) def conv(input_size,output_size,depth,size): '''Create a conv model with specific input_size,output_size,depth and number of neuros.''' layers = [tf.keras.layers.Conv2D(size,(3, 3),activation='relu',input_shape=(input_size,input_size,3))] for i in range(depth-1): layers += [ tf.keras.layers.MaxPooling2D((2, 2)), tf.keras.layers.Conv2D(size,(3, 3),activation='relu',padding='same')] layers += [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(size,activation='relu'), tf.keras.layers.Dense(output_size)] return tf.keras.Sequential(layers) def models(input_size,output_size): '''This generator returns models to test in the experiment.''' #dense layers, different sizes for i in range(1,4): for j in range(1,6): yield dense(input_size,output_size,i,j*32),i,j*32,'dense' #conv model, different sizes for i in range(1,4): for j in range(1,6): yield conv(input_size,output_size,i,j*8),i,j*8,'conv'
nilq/baby-python
python
# -*- coding: utf-8 -*- """ @file @brief """ import timeit import pandas def unit(x): """ Optimizes the rendering of time. .. runpython:: :showcode: from jupytalk.benchmark.mlprediction import unit print(unit(34)) print(unit(3.4)) print(unit(0.34)) print(unit(0.034)) print(unit(0.0034)) print(unit(0.00034)) print(unit(0.000034)) print(unit(0.0000034)) print(unit(0.00000034)) """ if x >= 1: return "%1.2f s" % x elif x >= 1e-3: return "%1.2f ms" % (x * 1000) elif x >= 1e-6: return "%1.2f µs" % (x * 1000**2) elif x >= 1e-9: return "%1.2f ns" % (x * 1000**3) else: return "%1.2g s" % x def timeexec(legend, code, number=50, repeat=200, verbose=True, context=None): """ Measures the time for a given expression. @param legend name of the experiment @param code code to measure (as a string) @param number number of time to run the expression (and then divide by this number to get an average) @param repeat number of times to repeat the computation of the above average @param verbose print the time @param globals context (usuable equal to ``globals()``) @return dictionary .. runpython:: :showcode: from jupytalk.benchmark.mlprediction import timeexec code = "3 * 45535266234653452" print(timeexec("multiplication", code)) """ if context is None: context = globals() rep = timeit.repeat(code, number=number, repeat=repeat, globals=context) ave = sum(rep) / (number * repeat) std = (sum((x / number - ave)**2 for x in rep) / repeat)**0.5 fir = rep[0] / number fir3 = sum(rep[:3]) / (3 * number) las3 = sum(rep[-3:]) / (3 * number) rep.sort() mini = rep[len(rep) // 20] / number maxi = rep[-len(rep) // 20] / number if verbose: print("Average: %s deviation %s (with %d runs) in [%s, %s]" % ( unit(ave), unit(std), number, unit(mini), unit(maxi))) return dict(legend=legend, average=ave, deviation=std, first=fir, first3=fir3, last3=las3, repeat=repeat, min5=mini, max5=maxi, code=code, run=number) def make_dataframe(labels, arrays): """ Builds a dataframe from multiple arrays. @param labels list of labels @param arrays list of arrays (or one array) @return dataframes """ if labels is not None: df = [pandas.DataFrame(data={'Label': labels})] else: df = [] if isinstance(arrays, list): for i, ar in enumerate(arrays): d = pandas.DataFrame( data=ar, columns=["F%d_%d" % (i, j) for j in range(ar.shape[1])]) df.append(d) else: ar = arrays d = pandas.DataFrame( data=ar, columns=["F%d" % j for j in range(ar.shape[1])]) df.append(d) return pandas.concat(df, axis=1)
nilq/baby-python
python
DRB1_1385_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.4251, 'D': -1.5135, 'G': -1.5724, 'F': 0.54328, 'I': 0.26645, 'H': 0.26629, 'K': 0.082601, 'M': 0.91659, 'L': 0.78109, 'N': 0.036182, 'Q': 0.0014865, 'P': -1.5914, 'S': -0.64719, 'R': -0.2678, 'T': -0.81058, 'W': 0.22027, 'V': -0.1439, 'Y': -0.18922}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4081, 'D': -2.3885, 'G': -0.70585, 'F': -1.3969, 'I': 0.69291, 'H': -0.11092, 'K': 1.2687, 'M': -0.90111, 'L': 0.18921, 'N': -0.58393, 'Q': -0.31017, 'P': 0.49539, 'S': -0.090593, 'R': 0.97197, 'T': 0.8083, 'W': -1.3962, 'V': 1.1966, 'Y': -1.3998}, 6: {'A': 0.0, 'E': -1.0872, 'D': -1.7521, 'G': -0.91274, 'F': 0.16734, 'I': 0.090774, 'H': -0.091681, 'K': -0.29398, 'M': 0.48662, 'L': 0.57886, 'N': -0.14347, 'Q': -0.26554, 'P': -0.57386, 'S': -0.69106, 'R': 0.26585, 'T': -0.86328, 'W': -0.041585, 'V': -0.15572, 'Y': -0.14029}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.54182, 'D': -0.78869, 'G': 0.1478, 'F': 0.55352, 'I': 0.43948, 'H': -0.38613, 'K': -0.2285, 'M': 0.82817, 'L': -0.20101, 'N': -0.73258, 'Q': -0.073797, 'P': -0.48481, 'S': 1.0175, 'R': 0.22077, 'T': -0.6178, 'W': -0.99494, 'V': 0.11956, 'Y': 0.066112}}
nilq/baby-python
python
from linghelper.phonetics.praat import PraatLoader from linghelper.phonetics.praat.helper import to_time_based_dict from scipy.interpolate import interp1d from numpy import vstack,array def interpolate_pitch(pitch_track): defined_keys = [k for k in sorted(pitch_track.keys()) if pitch_track[k]['Pitch'] != '--undefined--'] x = array(defined_keys) y = array([ pitch_track[k]['Pitch'] for k in defined_keys]) if len(x) == 0: return None times = list(filter(lambda z: z >= min(x) and z <= max(x),defined_keys)) f = interp1d(x,y) return f(times) def get_intensity_spline(intensity_track): y = array([ intensity_track[k]['Intensity'] for k in sorted(intensity_track.keys()) if intensity_track[k]['Intensity'] != '--undefined--']) return y def interpolate_prosody(pitch,intensity): defined_keys = [k for k in sorted(pitch.keys()) if pitch[k]['Pitch'] != '--undefined--'] x = array(defined_keys) y = array([ pitch[k]['Pitch'] for k in defined_keys]) if len(x) == 0: return None times = list(filter(lambda z: z >= min(x) and z <= max(x),defined_keys)) p = interp1d(x,y) x = list(sorted(intensity.keys())) y =[intensity[k]['Intensity'] for k in x] i = interp1d(x, y) pitch_spline = p(times) intensity_spline = i(times) return vstack((pitch_spline,intensity_spline)).T def to_pitch(filename,time_step): p = PraatLoader() output = p.run_script('pitch.praat', filename,time_step) try: pitch = to_time_based_dict(output) except IndexError: return None pitch_spline = interpolate_pitch(pitch) if pitch_spline is None: return None return pitch_spline.T def to_intensity(filename,time_step): p = PraatLoader() output = p.run_script('intensity.praat', filename,time_step) intensity = to_time_based_dict(output) intensity_spline = get_intensity_spline(intensity) return intensity_spline.T def to_prosody(filename,time_step): p = PraatLoader() output = p.run_script('pitch.praat', filename,time_step) try: pitch = to_time_based_dict(output) except IndexError: return None output = p.run_script('intensity.praat', filename,time_step) intensity = to_time_based_dict(output) prosody = interpolate_prosody(pitch,intensity) return prosody
nilq/baby-python
python
import sys import web_tests.create_test_suite as tests import web_tests.csv2_runner as csv2_runner def main(gvar): # setup to run Chromium tests runner = csv2_runner.Csv2TestRunner(verbosity=2, gvar=gvar) suite = tests.chromium_test_suite() runner.run(suite) print() if __name__ == "__main__": main(None)
nilq/baby-python
python
from payment.payment_interface import PaymentInterface from rest_framework.test import APITestCase class TestPaymentInterface(APITestCase): def test_get(self): res = PaymentInterface.get('https://api.paystack.co/bank') self.assertEquals(res.get('status'), True) def test_get_with_auth(self): res = PaymentInterface.get_with_auth( 'https://api.paystack.co/bank/resolve?account_number=310484182&bank_code=011') self.assertEquals(res.get('status'), False)
nilq/baby-python
python
import numpy as np from multiprocessing import Pool from multiprocessing import cpu_count _user_input = None _item_input = None _labels = None _batch_size = None _index = None _dataset = None # input: dataset(Mat, List, Rating, Negatives), batch_choice, num_negatives # output: [_user_input_list, _item_input_list, _labels_list] def sampling(dataset, num_negatives): _user_input, _item_input, _labels = [], [], [] num_users, num_items = dataset.trainMatrix.shape for (u, i) in dataset.trainMatrix.keys(): # positive instance _user_input.append(u) _item_input.append(i) _labels.append(1) # negative instances for t in xrange(num_negatives): j = np.random.randint(num_items) while dataset.trainMatrix.has_key((u, j)): j = np.random.randint(num_items) _user_input.append(u) _item_input.append(j) _labels.append(0) return _user_input, _item_input, _labels def shuffle(samples, batch_size, dataset): global _user_input global _item_input global _labels global _batch_size global _index global _dataset _user_input, _item_input, _labels = samples _batch_size = batch_size _dataset = dataset _index = range(len(_labels)) np.random.shuffle(_index) num_batch = len(_labels) // _batch_size pool = Pool(cpu_count()) res = pool.map(_get_train_batch, range(num_batch)) pool.close() pool.join() user_list = [r[0] for r in res] num_idx = [r[1] for r in res] item_list = [r[2] for r in res] labels_list = [r[3] for r in res] return user_list, num_idx, item_list, labels_list def _get_train_batch(i): user_batch, num_batch, item_batch, labels_batch = [], [], [], [] begin = i * _batch_size trainList = _dataset.trainList num_items = _dataset.num_items for idx in range(begin, begin + _batch_size): user_idx = _user_input[_index[idx]] item_idx = _item_input[_index[idx]] nonzero_row = [] nonzero_row += trainList[user_idx] num_batch.append(_remove_item(num_items, nonzero_row, item_idx)) user_batch.append(nonzero_row) item_batch.append(item_idx) labels_batch.append(_labels[_index[idx]]) return np.array(_add_mask(num_items, user_batch, max(num_batch))), np.array(num_batch), np.array(item_batch), np.array(labels_batch) def _remove_item(feature_mask, users, item): flag = 0 for i in range(len(users)): if users[i] == item: users[i] = users[-1] users[-1] = feature_mask flag = 1 break return len(users) - flag def _add_mask(feature_mask, features, num_max): # uniformalize the length of each batch for i in xrange(len(features)): features[i] = features[i] + [feature_mask] * (num_max + 1 - len(features[i])) return features
nilq/baby-python
python
#!/usr/bin/python3 import numpy as np from os.path import join as pjoin from os import linesep from shutil import copyfile from scipy.io import mmwrite from scipy.sparse import coo_matrix import gzip diri='data/raw' diro='data/de' key='celltype' values=['dysfunctional','naive'] #Load covariate info dc=np.loadtxt(pjoin(diri,'cov.tsv.gz'),delimiter='\t') with open(pjoin(diri,'cov.txt'),'r') as f: namec=f.readlines() namec=np.array([x.strip() for x in namec]) namecdict=dict(zip(namec,range(len(namec)))) #Select cells for DE ids=[namecdict[key+'='+x] for x in values] ids=dc[ids].astype(bool) assert ids.any(axis=1).all() ida=ids.any(axis=0) #Process covariates namecn_id=np.array([namecdict[x] for x in filter(lambda x:not x.startswith(key+'='),namec)]) dcn=dc[namecn_id][:,ida] #Remove single-valued covariates t1=[len(np.unique(x))>1 for x in dcn] namecn_id=namecn_id[t1] dcn=dcn[t1] namecn=namec[namecn_id] #Output covariates np.savetxt(pjoin(diro,'0_cov.tsv.gz'),dcn,delimiter='\t',fmt="%.8G") with open(pjoin(diro,'0_cov.txt'),'w') as f: f.write(linesep.join(namecn)) del namecn,dcn #Process cells with open(pjoin(diri,'cell.txt'),'r') as f: names=f.readlines() names=np.array([x.strip() for x in names]) namesn=names[ida] with open(pjoin(diro,'0_cell.txt'),'w') as f: f.write(linesep.join(namesn)) #Process transcriptome dt=np.loadtxt(pjoin(diri,'read.tsv.gz'),delimiter='\t') dtn=dt[:,ida] dtn=coo_matrix(dtn) with gzip.open(pjoin(diro,'0_read.mtx.gz'),'w') as f: mmwrite(f,dtn,field='integer') #Process grouping dg=np.zeros(len(ida),dtype=int) dg[ids[0]]=1 dg=dg[ida].astype(int) #Output grouping np.savetxt(pjoin(diro,'0_group.tsv.gz'),dg,delimiter='\t',fmt="%u") #Copy genes copyfile(pjoin(diri,'gene.txt'),pjoin(diro,'0_gene.txt'))
nilq/baby-python
python
__version__ = "0.3.2" __api_version__ = "0.10.1"
nilq/baby-python
python
from aiocloudflare.commons.auth import Auth class Dnssec(Auth): _endpoint1 = "zones" _endpoint2 = "dnssec" _endpoint3 = None
nilq/baby-python
python
#reference: https://github.com/val-iisc/capnet/blob/master/src/proj_codes.py from __future__ import division import math import numpy as np import torch import utils.network_utils class Projector(torch.nn.Module): ''' Project the 3D point cloud to 2D plane args: xyz: float tensor, (BS,N_PTS,3); input point cloud values assumed to be in (-1,1) az: float tensor, (BS); azimuthal angle of camera in radians el: float tensor, (BS); elevation of camera in radians N_PTS: float, (); number of points in point cloud returns: grid_val: float, (N_batch,H,W); output silhouette ''' def __init__(self, cfg): super(Projector, self).__init__() # self.batch_size = cfg.CONST.BATCH_SIZE self.cfg = cfg self.n_pts = cfg.CONST.NUM_POINTS self.grid_h = cfg.PROJECTION.GRID_H self.grid_w = cfg.PROJECTION.GRID_W self.sigma_sq_cont = cfg.PROJECTION.SIGMA_SQ_CONT self.sigma_sq_disc = cfg.PROJECTION.SIGMA_SQ_DISC def forward(self, xyz, az, el): # World co-ordinates to camera co-ordinates batch_size = xyz.size(0) pcl_out_rot = self.world2cam(xyz, az, el, batch_size=batch_size, N_PTS=self.n_pts) # Perspective transform pcl_out_persp = self.perspective_transform(pcl_out_rot, batch_size=batch_size, grid_h=self.grid_h, grid_w=self.grid_w) if self.cfg.SUPERVISION_2D.PROJ_TYPE == "CONT": proj_pred = self.cont_proj(pcl_out_persp, grid_h=self.grid_h, grid_w=self.grid_w, sigma_sq=self.sigma_sq_cont) elif self.cfg.SUPERVISION_2D.PROJ_TYPE == "DISC": proj_pred = self.cont_proj(pcl_out_persp, grid_h=self.grid_h, grid_w=self.grid_w, sigma_sq=self.sigma_sq_disc) return proj_pred def cont_proj(self, pcl, grid_h, grid_w, sigma_sq=0.5): ''' Continuous approximation of Orthographic projection of point cloud to obtain Silhouette args: pcl: float, (N_batch,N_PTS,3); input point cloud values assumed to be in (-1,1) grid_h, grid_w: int, (); output depth map height and width returns: grid_val: float, (N_batch,H,W); output silhouette ''' x, y, z = pcl.chunk(3, dim=2) # divide to three parts pcl_norm = torch.cat([x, y, z], dim=2) pcl_xy = torch.cat([x,y], dim=2) #(BS, N_PTS, 2) out_grid = torch.meshgrid(torch.arange(0, grid_h), torch.arange(0, grid_w)) out_grid = [out_grid[0].type(torch.FloatTensor), out_grid[1].type(torch.FloatTensor)] grid_z = torch.unsqueeze(torch.zeros_like(out_grid[0]), 2) # (H,W,1) grid_xyz = torch.cat([torch.stack(out_grid, 2), grid_z], dim=2) # (H,W,3) grid_xy = torch.stack(out_grid, 2) # (H,W,2) grid_xy = utils.network_utils.var_or_cuda(grid_xy) grid_diff = torch.unsqueeze(torch.unsqueeze(pcl_xy, 2), 2) - grid_xy # (BS,N_PTS,H,W,2) grid_val = self.apply_kernel(grid_diff, sigma_sq) # (BS,N_PTS,H,W,2) grid_val = grid_val[:,:,:,:,0]*grid_val[:,:,:,:,1] # (BS,N_PTS,H,W) grid_val = torch.sum(grid_val, dim=1) # (BS,H,W) grid_val = torch.tanh(grid_val) return grid_val """ def disc_proj(self, pcl, grid_h, grid_w): ''' Discrete Orthographic projection of point cloud to obtain Silhouette Handles only batch size 1 for now args: pcl: float, (N_batch,N_Pts,3); input point cloud values assumed to be in (-1,1) grid_h, grid_w: int, (); output depth map height and width returns: grid_val: float, (N_batch,H,W); output silhouette ''' x, y, z = pcl.chunk(3, dim=2) # divide to three parts pcl_norm = torch.cat([x, y, z], dim=2) pcl_xy = torch.cat([x,y], dim=2) 2048, 2 xy_indices = pcl_xy[0].long() xy_values = torch.ones_like(xy_indices) print(pcl_xy.requires_grad) print(xy_indices.requires_grad) print(xy_values.requires_grad) xy_shape = torch.zeros((grid_h, grid_w), dtype=xy_values.dtype) xy_shape = utils.network_utils.var_or_cuda(xy_shape) # xy_shape[xy_indices[:,0], xy_indices[:,1]] = 1. # out_grid = torch.unsqueeze(xy_shape, 0) out_grid = xy_shape out_grid = torch.unsqueeze(xy_shape, 0) print("grad:", out_grid.requires_grad) return out_grid """ def apply_kernel(self, x, sigma_sq=0.5): ''' Get the un-normalized gaussian kernel with point co-ordinates as mean and variance sigma_sq args: x: float, (BS,N_PTS,H,W,2); mean subtracted grid input sigma_sq: float, (); variance of gaussian kernel returns: out: float, (BS,N_PTS,H,W,2); gaussian kernel ''' out = (torch.exp(-(x**2)/(2.*sigma_sq))) return out def perspective_transform(self, xyz, batch_size, grid_h, grid_w): ''' Perspective transform of pcl; Intrinsic camera parameters are assumed to be known (here, obtained using parameters of GT image renderer, i.e. Blender) Here, output grid size is assumed to be (64,64) in the K matrix TODO: use output grid size as argument args: xyz: float, (BS,N_PTS,3); input point cloud values assumed to be in (-1,1) returns: xyz_out: float, (BS,N_PTS,3); perspective transformed point cloud ''' alpha_u = 60. * float(grid_h)/32. alpha_v = 60. * float(grid_w)/32. u_0 = float(grid_h)/2. v_0 = float(grid_w)/2. K = np.array([ [alpha_u, 0., -u_0], [0., alpha_v, -v_0], [0., 0., 1.]]).astype(np.float32) K = np.expand_dims(K, 0) K = np.tile(K, [batch_size,1,1]) K = torch.from_numpy(K) K = utils.network_utils.var_or_cuda(K) xyz_out = torch.matmul(K, xyz.permute(0, 2, 1)) xy_out = xyz_out[:,:2]/abs(torch.unsqueeze(xyz[:,:,2],1)) xyz_out = torch.cat([xy_out, abs(xyz_out[:,2:])],dim=1) return xyz_out.permute(0, 2, 1) def world2cam(self, xyz, az, el, batch_size, N_PTS=1024): ''' Convert pcl from world co-ordinates to camera co-ordinates, the rotation matrix is different from capnet, inorder to fit the training data orientation. in capnet: chair face to z axis here: chair face to x axis args: xyz: float tensor, (BS,N_PTS,3); input point cloud values assumed to be in (-1,1) az: float tensor, (BS); azimuthal angle of camera in radians el: float tensor, (BS); elevation of camera in radians batch_size: int, (); batch size N_PTS: float, (); number of points in point cloud returns: xyz_out: float tensor, (BS,N_PTS,3); output point cloud in camera co-ordinates ''' # Camera origin calculation - az,el,d to 3D co-ord # Rotation """ rotmat_az=[ [torch.ones_like(az),torch.zeros_like(az),torch.zeros_like(az)], [torch.zeros_like(az),torch.cos(az),-torch.sin(az)], [torch.zeros_like(az),torch.sin(az),torch.cos(az)] ] """ # y ---> x rotmat_az=[ [torch.cos(az),torch.sin(az),torch.zeros_like(az)], [-torch.sin(az),torch.cos(az),torch.zeros_like(az)], [torch.zeros_like(az),torch.zeros_like(az), torch.ones_like(az)] ] rotmat_az = [ torch.stack(x) for x in rotmat_az ] # z ---> x, in dataloader, az = original az - 90 degree, which means here is actually x ----> -z rotmat_el=[ [torch.cos(el),torch.zeros_like(az), torch.sin(el)], [torch.zeros_like(az),torch.ones_like(az),torch.zeros_like(az)], [-torch.sin(el),torch.zeros_like(az), torch.cos(el)] ] rotmat_el = [ torch.stack(x) for x in rotmat_el ] rotmat_az = torch.stack(rotmat_az, 0) # [3,3,B] rotmat_el = torch.stack(rotmat_el, 0) # [3,3,B] rotmat_az = rotmat_az.permute(2, 0, 1) # [B,3,3] rotmat_el = rotmat_el.permute(2, 0, 1) # [B,3,3] rotmat = torch.matmul(rotmat_el, rotmat_az) # Transformation(t) # Distance of object from camera - fixed to 2 d = 2. # Calculate translation params tx, ty, tz = [0, 0, d] tr_mat = torch.unsqueeze(torch.tensor([tx, ty, tz]), 0).repeat(batch_size,1) # [B,3] tr_mat = torch.unsqueeze(tr_mat,2) # [B,3,1] tr_mat = tr_mat.permute(0, 2, 1) # [B,1,3] tr_mat = tr_mat.repeat(1, N_PTS, 1) # [B,1024,3] tr_mat = utils.network_utils.var_or_cuda(tr_mat) # [B,1024,3] xyz_out = torch.matmul(rotmat, xyz.permute(0, 2, 1)) - tr_mat.permute(0, 2, 1) return xyz_out.permute(0, 2, 1)
nilq/baby-python
python
#!/usr/bin/python #--2 and 3-- __author__ = "gray" __date__ = "20171228" __version__ = "1.0.2" __aim__ = """ GetData.py for miseq pipeline CHSLAB used Copy file, Rename file, unzip file > for QC used input: sample sheet project Dir (Target Dir) [sample sheet] format RawSampleName\tNewSampleName[marker] """ import sys import os import subprocess as sup def GetData(SampleSheet, TargetDir="./"): #check SampleSheet if os.path.exists(SampleSheet): pass else: print("No Find:"+SampleSheet) sys.exit(1) #-------- with open(SampleSheet,"r") as Fr: #no header content = Fr.readlines() for line in content: item = line.strip().split("\t") Oripath = item[0] Marker = item[1] #---cp change name, (with gz file) Comd = "cp "+Oripath+" "+TargetDir+"/"+Marker+".fastq.gz" print(Comd) sup.call(Comd, shell=True) # if __name__ == "__main__": SampleSheet = sys.argv[1] TargetDir = sys.argv[2] #check dir if os.path.exists(TargetDir): pass else: sup.call("mkdir -p "+TargetDir,shell=True) GetData(SampleSheet, TargetDir)
nilq/baby-python
python
# Standard Library import json import os import pstats import shutil import time from multiprocessing.pool import ThreadPool # Third Party import boto3 import pandas as pd import pytest # First Party from smdebug.core.access_layer.utils import is_s3 from smdebug.profiler.analysis.python_profile_analysis import PyinstrumentAnalysis, cProfileAnalysis from smdebug.profiler.profiler_constants import ( CONVERT_TO_MICROSECS, CPROFILE_NAME, CPROFILE_STATS_FILENAME, PYINSTRUMENT_HTML_FILENAME, PYINSTRUMENT_JSON_FILENAME, PYINSTRUMENT_NAME, ) from smdebug.profiler.python_profile_utils import PythonProfileModes, StepPhase from smdebug.profiler.python_profiler import ( PyinstrumentPythonProfiler, cProfilePythonProfiler, cProfileTimer, ) @pytest.fixture def test_framework(): return "test-framework" @pytest.fixture() def cprofile_python_profiler(out_dir, test_framework): return cProfilePythonProfiler(out_dir, test_framework, cProfileTimer.TOTAL_TIME) @pytest.fixture() def pyinstrument_python_profiler(out_dir, test_framework): return PyinstrumentPythonProfiler(out_dir, test_framework) @pytest.fixture() def framework_dir(out_dir, test_framework): return "{0}/framework/{1}".format(out_dir, test_framework) @pytest.fixture(autouse=True) def reset_python_profiler_dir(framework_dir): shutil.rmtree(framework_dir, ignore_errors=True) @pytest.fixture(scope="session") def bucket_prefix(): return f"s3://smdebug-testing/resources/python_profile/{int(time.time())}" def pre_step_zero_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def start_end_step_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def end_start_step_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def between_modes_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def eval_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def post_hook_close_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def time_function(): time.sleep( 0.0011 ) # stall long enough to be recorded by pyinstrument, which records every 0.001 seconds def _upload_s3_folder(bucket, key, folder): s3_client = boto3.client("s3") filenames = [] for root, _, files in os.walk(folder): for file in files: node_id = os.path.basename(os.path.dirname(root)) stats_dir = os.path.basename(root) full_key = os.path.join(key, node_id, stats_dir, file) filenames.append((os.path.join(root, file), bucket, full_key)) def upload_files(args): s3_client.upload_file(*args) pool = ThreadPool(processes=10) pool.map(upload_files, filenames) def _validate_analysis(profiler_name, stats, expected_functions): function_names = [ pre_step_zero_function.__name__, start_end_step_function.__name__, end_start_step_function.__name__, between_modes_function.__name__, eval_function.__name__, post_hook_close_function.__name__, time_function.__name__, ] assert stats is not None, "No stats found!" for analysis_function in function_names: if profiler_name == CPROFILE_NAME: function_stats_list = stats.function_stats_list assert len(function_stats_list) > 0 if analysis_function in expected_functions: assert any( [analysis_function in stat.function_name for stat in function_stats_list] ), f"{analysis_function} should be found in function stats!" else: assert all( [analysis_function not in stat.function_name for stat in function_stats_list] ), f"{analysis_function} should not be found in function stats!" else: assert len(stats) == 1 actual_functions = map( lambda x: x["function"], stats[0].json_stats["root_frame"]["children"] ) assert set(actual_functions) == set(expected_functions) @pytest.mark.parametrize("use_pyinstrument", [False, True]) @pytest.mark.parametrize("steps", [(1, 2), (1, 5)]) def test_python_profiling( use_pyinstrument, cprofile_python_profiler, pyinstrument_python_profiler, framework_dir, steps ): if use_pyinstrument: python_profiler = pyinstrument_python_profiler profiler_name = PYINSTRUMENT_NAME allowed_files = [PYINSTRUMENT_JSON_FILENAME, PYINSTRUMENT_HTML_FILENAME] else: python_profiler = cprofile_python_profiler profiler_name = CPROFILE_NAME allowed_files = [CPROFILE_STATS_FILENAME] python_stats_dir = os.path.join(framework_dir, profiler_name) start_step, end_step = steps current_step = start_step while current_step < end_step: python_profiler.start_profiling(StepPhase.STEP_START, start_step=current_step) assert python_profiler._start_step == current_step assert python_profiler._start_phase == StepPhase.STEP_START python_profiler.stop_profiling(StepPhase.STEP_END, current_step) current_step += 1 # Test that directory and corresponding files exist. assert os.path.isdir(python_stats_dir) for node_id in os.listdir(python_stats_dir): node_dir_path = os.path.join(python_stats_dir, node_id) stats_dirs = os.listdir(node_dir_path) assert len(stats_dirs) == (end_step - start_step) for stats_dir in stats_dirs: # Validate that the expected files are in the stats dir stats_dir_path = os.path.join(node_dir_path, stats_dir) stats_files = os.listdir(stats_dir_path) assert set(stats_files) == set(allowed_files) # Validate the actual stats files for stats_file in stats_files: stats_path = os.path.join(stats_dir_path, stats_file) if stats_file == CPROFILE_STATS_FILENAME: assert pstats.Stats(stats_path) elif stats_file == PYINSTRUMENT_JSON_FILENAME: with open(stats_path, "r") as f: assert json.load(f) @pytest.mark.parametrize("use_pyinstrument", [False, True]) @pytest.mark.parametrize("s3", [False, True]) def test_python_analysis( use_pyinstrument, cprofile_python_profiler, pyinstrument_python_profiler, framework_dir, test_framework, bucket_prefix, s3, ): """ This test is meant to test that the cProfile/pyinstrument analysis retrieves the correct step's stats based on the specified interval. Stats are either retrieved from s3 or generated manually through python profiling. """ if use_pyinstrument: python_profiler = pyinstrument_python_profiler analysis_class = PyinstrumentAnalysis profiler_name = PYINSTRUMENT_NAME num_expected_files = 14 else: python_profiler = cprofile_python_profiler analysis_class = cProfileAnalysis profiler_name = CPROFILE_NAME num_expected_files = 7 python_stats_dir = os.path.join(framework_dir, profiler_name) if s3: # Fetch stats from s3 os.makedirs(python_stats_dir) python_profile_analysis = analysis_class( local_profile_dir=python_stats_dir, s3_path=bucket_prefix ) else: # Do analysis and use those stats. # pre_step_zero_function is called in between the start of the script and the start of first step of TRAIN. python_profiler.start_profiling(StepPhase.START) pre_step_zero_function() python_profiler.stop_profiling( StepPhase.STEP_START, end_mode=PythonProfileModes.TRAIN, end_step=1 ) # start_end_step_function is called in between the start and end of first step of TRAIN. python_profiler.start_profiling( StepPhase.STEP_START, start_mode=PythonProfileModes.TRAIN, start_step=1 ) start_end_step_function() python_profiler.stop_profiling( StepPhase.STEP_END, end_mode=PythonProfileModes.TRAIN, end_step=1 ) # end_start_step_function is called in between the end of first step and the start of second step of TRAIN. python_profiler.start_profiling( StepPhase.STEP_END, start_mode=PythonProfileModes.TRAIN, start_step=1 ) end_start_step_function() python_profiler.stop_profiling( StepPhase.STEP_START, end_mode=PythonProfileModes.TRAIN, end_step=2 ) # train_and_eval function is called in between the TRAIN and EVAL modes. python_profiler.start_profiling( StepPhase.STEP_END, start_mode=PythonProfileModes.TRAIN, start_step=1 ) between_modes_function() python_profiler.stop_profiling( StepPhase.STEP_START, end_mode=PythonProfileModes.EVAL, end_step=1 ) # eval function is called in between the start and end of first step of EVAL. python_profiler.start_profiling( StepPhase.STEP_START, start_mode=PythonProfileModes.EVAL, start_step=1 ) eval_function() python_profiler.stop_profiling( StepPhase.STEP_END, end_mode=PythonProfileModes.EVAL, end_step=1 ) # post_hook_close_function is called in between the end of the last step of EVAL and the end of the script. python_profiler.start_profiling( StepPhase.STEP_END, start_mode=PythonProfileModes.EVAL, start_step=1 ) post_hook_close_function() python_profiler.stop_profiling(StepPhase.END) # time function is called in between start and end of second step of TRAIN. # NOTE: This needs to be profiled last for tests to pass. python_profiler.start_profiling( StepPhase.STEP_START, start_mode=PythonProfileModes.TRAIN, start_step=2 ) time_function() python_profiler.stop_profiling( StepPhase.STEP_END, end_mode=PythonProfileModes.TRAIN, end_step=2 ) python_profile_analysis = analysis_class(local_profile_dir=python_stats_dir) _, bucket, prefix = is_s3(bucket_prefix) key = os.path.join(prefix, "framework", test_framework, profiler_name) _upload_s3_folder(bucket, key, python_stats_dir) python_profile_stats_df = python_profile_analysis.list_profile_stats() assert isinstance(python_profile_stats_df, pd.DataFrame) assert python_profile_stats_df.shape[0] == num_expected_files # Test that pre_step_zero_function call is recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_pre_step_zero_profile_stats(refresh_stats=False) _validate_analysis(profiler_name, stats, [pre_step_zero_function.__name__]) # Test that start_end_step_function call is recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_profile_stats_by_step(1, refresh_stats=False) _validate_analysis(profiler_name, stats, [start_end_step_function.__name__]) # Test that end_start_step_function call is recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_profile_stats_by_step( 1, end_step=2, start_phase=StepPhase.STEP_END, end_phase=StepPhase.STEP_START, refresh_stats=False, ) _validate_analysis(profiler_name, stats, [end_start_step_function.__name__]) # Test that train_and_eval_function call is recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_profile_stats_between_modes( PythonProfileModes.TRAIN, PythonProfileModes.EVAL, refresh_stats=False ) _validate_analysis(profiler_name, stats, [between_modes_function.__name__]) # Test that eval_function call is recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_profile_stats_by_step( 1, mode=PythonProfileModes.EVAL, refresh_stats=False ) _validate_analysis(profiler_name, stats, [eval_function.__name__]) # Test that pre_step_zero_function call is recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_post_hook_close_profile_stats(refresh_stats=False) _validate_analysis(profiler_name, stats, [post_hook_close_function.__name__]) # Test that time_function call is recorded in received stats, but not the other functions. time_function_step_stats = python_profile_analysis.python_profile_stats[-1] step_start_time = ( time_function_step_stats.start_time_since_epoch_in_micros / CONVERT_TO_MICROSECS ) stats = python_profile_analysis.fetch_profile_stats_by_time( step_start_time, time.time(), refresh_stats=False ) _validate_analysis(profiler_name, stats, [time_function.__name__]) # Following analysis functions are for cProfile only if use_pyinstrument: return # Test that functions called in TRAIN are recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_profile_stats_by_training_phase(refresh_stats=False)[ (PythonProfileModes.TRAIN, PythonProfileModes.TRAIN) ] _validate_analysis( profiler_name, stats, [ start_end_step_function.__name__, end_start_step_function.__name__, time_function.__name__, ], ) # Test that functions called in training loop are recorded in received stats, but not the other functions. stats = python_profile_analysis.fetch_profile_stats_by_job_phase(refresh_stats=False)[ "training_loop" ] _validate_analysis( profiler_name, stats, [ start_end_step_function.__name__, end_start_step_function.__name__, between_modes_function.__name__, eval_function.__name__, time_function.__name__, ], )
nilq/baby-python
python
# # Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com> # # See the file LICENSE.txt for your full rights. # """Console simulator for the weewx weather system""" from __future__ import with_statement from __future__ import absolute_import from __future__ import print_function import math import random import time import weewx.drivers import weeutil.weeutil DRIVER_NAME = 'Simulator' DRIVER_VERSION = "3.3" def loader(config_dict, engine): start_ts, resume_ts = extract_starts(config_dict, DRIVER_NAME) station = Simulator(start_time=start_ts, resume_time=resume_ts, **config_dict[DRIVER_NAME]) return station def extract_starts(config_dict, driver_name): """Extract the start and resume times out of the configuration dictionary""" # This uses a bit of a hack to have the simulator resume at a later # time. It's not bad, but I'm not enthusiastic about having special # knowledge about the database in a driver, albeit just the loader. start_ts = resume_ts = None if 'start' in config_dict[driver_name]: # A start has been specified. Extract the time stamp. start_tt = time.strptime(config_dict[driver_name]['start'], "%Y-%m-%dT%H:%M") start_ts = time.mktime(start_tt) # If the 'resume' keyword is present and True, then get the last # archive record out of the database and resume with that. if weeutil.weeutil.to_bool(config_dict[driver_name].get('resume', False)): import weewx.manager import weedb try: # Resume with the last time in the database. If there is no such # time, then fall back to the time specified in the configuration # dictionary. with weewx.manager.open_manager_with_config(config_dict, 'wx_binding') as dbmanager: resume_ts = dbmanager.lastGoodStamp() except weedb.OperationalError: pass else: # The resume keyword is not present. Start with the seed time: resume_ts = start_ts return start_ts, resume_ts class Simulator(weewx.drivers.AbstractDevice): """Station simulator""" def __init__(self, **stn_dict): """Initialize the simulator NAMED ARGUMENTS: loop_interval: The time (in seconds) between emitting LOOP packets. [Optional. Default is 2.5] start_time: The start (seed) time for the generator in unix epoch time [Optional. If 'None', or not present, then present time will be used.] resume_time: The start time for the loop. [Optional. If 'None', or not present, then start_time will be used.] mode: Controls the frequency of packets. One of either: 'simulator': Real-time simulator - sleep between LOOP packets 'generator': Emit packets as fast as possible (useful for testing) [Required. Default is simulator.] observations: Comma-separated list of observations that should be generated. If nothing is specified, then all observations will be generated. [Optional. Default is not defined.] """ self.loop_interval = float(stn_dict.get('loop_interval', 2.5)) if 'start_time' in stn_dict and stn_dict['start_time'] is not None: # A start time has been specified. We are not in real time mode. self.real_time = False # Extract the generator start time: start_ts = float(stn_dict['start_time']) # If a resume time keyword is present (and it's not None), # then have the generator resume with that time. if 'resume_time' in stn_dict and stn_dict['resume_time'] is not None: self.the_time = float(stn_dict['resume_time']) else: self.the_time = start_ts else: # No start time specified. We are in realtime mode. self.real_time = True start_ts = self.the_time = time.time() # default to simulator mode self.mode = stn_dict.get('mode', 'simulator') # The following doesn't make much meteorological sense, but it is # easy to program! self.observations = { 'outTemp' : Observation(magnitude=20.0, average= 50.0, period=24.0, phase_lag=14.0, start=start_ts), 'inTemp' : Observation(magnitude=5.0, average= 68.0, period=24.0, phase_lag=12.0, start=start_ts), 'barometer' : Observation(magnitude=1.0, average= 30.1, period=48.0, phase_lag= 0.0, start=start_ts), 'pressure' : Observation(magnitude=1.0, average= 30.1, period=48.0, phase_lag= 0.0, start=start_ts), 'windSpeed' : Observation(magnitude=5.0, average= 5.0, period=48.0, phase_lag=24.0, start=start_ts), 'windDir' : Observation(magnitude=180.0, average=180.0, period=48.0, phase_lag= 0.0, start=start_ts), 'windGust' : Observation(magnitude=6.0, average= 6.0, period=48.0, phase_lag=24.0, start=start_ts), 'windGustDir': Observation(magnitude=180.0, average=180.0, period=48.0, phase_lag= 0.0, start=start_ts), 'outHumidity': Observation(magnitude=30.0, average= 50.0, period=48.0, phase_lag= 0.0, start=start_ts), 'inHumidity' : Observation(magnitude=10.0, average= 20.0, period=24.0, phase_lag= 0.0, start=start_ts), 'radiation' : Solar(magnitude=1000, solar_start=6, solar_length=12), 'UV' : Solar(magnitude=14, solar_start=6, solar_length=12), 'rain' : Rain(rain_start=0, rain_length=3, total_rain=0.2, loop_interval=self.loop_interval), 'txBatteryStatus': BatteryStatus(), 'windBatteryStatus': BatteryStatus(), 'rainBatteryStatus': BatteryStatus(), 'outTempBatteryStatus': BatteryStatus(), 'inTempBatteryStatus': BatteryStatus(), 'consBatteryVoltage': BatteryVoltage(), 'heatingVoltage': BatteryVoltage(), 'supplyVoltage': BatteryVoltage(), 'referenceVoltage': BatteryVoltage(), 'rxCheckPercent': SignalStrength()} self.trim_observations(stn_dict) def trim_observations(self, stn_dict): """Calculate only the specified observations, or all if none specified""" if 'observations' in stn_dict and stn_dict['observations'] is not None: desired = [x.strip() for x in stn_dict['observations'].split(',')] for obs in list(self.observations): if obs not in desired: del self.observations[obs] def genLoopPackets(self): while True: # If we are in simulator mode, sleep first (as if we are gathering # observations). If we are in generator mode, don't sleep at all. if self.mode == 'simulator': # Determine how long to sleep if self.real_time: # We are in real time mode. Try to keep synched up with the # wall clock sleep_time = self.the_time + self.loop_interval - time.time() if sleep_time > 0: time.sleep(sleep_time) else: # A start time was specified, so we are not in real time. # Just sleep the appropriate interval time.sleep(self.loop_interval) # Update the simulator clock: self.the_time += self.loop_interval # Because a packet represents the measurements observed over the # time interval, we want the measurement values at the middle # of the interval. avg_time = self.the_time - self.loop_interval/2.0 _packet = {'dateTime': int(self.the_time+0.5), 'usUnits' : weewx.US } for obs_type in self.observations: _packet[obs_type] = self.observations[obs_type].value_at(avg_time) yield _packet def getTime(self): return self.the_time @property def hardware_name(self): return "Simulator" class Observation(object): def __init__(self, magnitude=1.0, average=0.0, period=96.0, phase_lag=0.0, start=None): """Initialize an observation function. magnitude: The value at max. The range will be twice this value average: The average value, averaged over a full cycle. period: The cycle period in hours. phase_lag: The number of hours after the start time when the observation hits its max start: Time zero for the observation in unix epoch time.""" if not start: raise ValueError("No start time specified") self.magnitude = magnitude self.average = average self.period = period * 3600.0 self.phase_lag = phase_lag * 3600.0 self.start = start def value_at(self, time_ts): """Return the observation value at the given time. time_ts: The time in unix epoch time.""" phase = 2.0 * math.pi * (time_ts - self.start - self.phase_lag) / self.period return self.magnitude * math.cos(phase) + self.average class Rain(object): bucket_tip = 0.01 def __init__(self, rain_start=0, rain_length=1, total_rain=0.1, loop_interval=None): """Initialize a rain simulator""" npackets = 3600 * rain_length / loop_interval n_rain_packets = total_rain / Rain.bucket_tip self.period = int(npackets/n_rain_packets) self.rain_start = 3600* rain_start self.rain_end = self.rain_start + 3600 * rain_length self.packet_number = 0 def value_at(self, time_ts): time_tt = time.localtime(time_ts) secs_since_midnight = time_tt.tm_hour * 3600 + time_tt.tm_min * 60.0 + time_tt.tm_sec if self.rain_start < secs_since_midnight <= self.rain_end: amt = Rain.bucket_tip if self.packet_number % self.period == 0 else 0.0 self.packet_number += 1 else: self.packet_number = 0 amt = 0 return amt class Solar(object): def __init__(self, magnitude=10, solar_start=6, solar_length=12): """Initialize a solar simulator Simulated ob will follow a single wave sine function starting at 0 and ending at 0. The solar day starts at time solar_start and finishes after solar_length hours. magnitude: the value at max, the range will be twice this value solar_start: decimal hour of day that obs start (6.75=6:45am, 6:20=6:12am) solar_length: length of day in decimal hours (10.75=10hr 45min, 10:10=10hr 6min) """ self.magnitude = magnitude self.solar_start = 3600 * solar_start self.solar_end = self.solar_start + 3600 * solar_length self.solar_length = 3600 * solar_length def value_at(self, time_ts): time_tt = time.localtime(time_ts) secs_since_midnight = time_tt.tm_hour * 3600 + time_tt.tm_min * 60.0 + time_tt.tm_sec if self.solar_start < secs_since_midnight <= self.solar_end: amt = self.magnitude * (1 + math.cos(math.pi * (1 + 2.0 * ((secs_since_midnight - self.solar_start) / self.solar_length - 1))))/2 else: amt = 0 return amt class BatteryStatus(object): def __init__(self, chance_of_failure=None, min_recovery_time=None): """Initialize a battery status. chance_of_failure - likeliehood that the battery should fail [0,1] min_recovery_time - minimum time until the battery recovers, seconds """ if chance_of_failure is None: chance_of_failure = 0.0005 # about once every 30 minutes if min_recovery_time is None: min_recovery_time = random.randint(300, 1800) # 5 to 15 minutes self.chance_of_failure = chance_of_failure self.min_recovery_time = min_recovery_time self.state = 0 self.fail_ts = 0 def value_at(self, time_ts): if self.state == 1: # recover if sufficient time has passed if time_ts - self.fail_ts > self.min_recovery_time: self.state = 0 else: # see if we need a failure if random.random() < self.chance_of_failure: self.state = 1 self.fail_ts = time_ts return self.state class BatteryVoltage(object): def __init__(self, nominal_value=None, max_variance=None): """Initialize a battery voltage.""" if nominal_value is None: nominal_value = 12.0 if max_variance is None: max_variance = 0.1 * nominal_value self.nominal = nominal_value self.variance = max_variance def value_at(self, time_ts): return self.nominal + self.variance * random.random() * random.randint(-1, 1) class SignalStrength(object): def __init__(self, minval=0.0, maxval=100.0): """Initialize a signal strength simulator.""" self.minval = minval self.maxval = maxval self.max_variance = 0.1 * (self.maxval - self.minval) self.value = self.minval + random.random() * (self.maxval - self.minval) def value_at(self, time_ts): newval = self.value + self.max_variance * random.random() * random.randint(-1, 1) newval = max(self.minval, newval) newval = min(self.maxval, newval) self.value = newval return self.value def confeditor_loader(): return SimulatorConfEditor() class SimulatorConfEditor(weewx.drivers.AbstractConfEditor): @property def default_stanza(self): return """ [Simulator] # This section is for the weewx weather station simulator # The time (in seconds) between LOOP packets. loop_interval = 2.5 # The simulator mode can be either 'simulator' or 'generator'. # Real-time simulator. Sleep between each LOOP packet. mode = simulator # Generator. Emit LOOP packets as fast as possible (useful for testing). #mode = generator # The start time. Format is YYYY-mm-ddTHH:MM. If not specified, the default # is to use the present time. #start = 2011-01-01T00:00 # The driver to use: driver = weewx.drivers.simulator """ if __name__ == "__main__": station = Simulator(mode='simulator',loop_interval=2.0) for packet in station.genLoopPackets(): print(weeutil.weeutil.timestamp_to_string(packet['dateTime']), packet)
nilq/baby-python
python
from typing import Dict import psycopg2 import requests def insert_reading(reading: Dict): sql = """ INSERT INTO youless_readings ( net_counter, power, consumption_high, consumption_low, production_high, production_low, gas ) VALUES( %s, %s, %s, %s, %s, %s, %s );""" conn = None try: # read database configuration # connect to the PostgreSQL database conn = psycopg2.connect(host="postgres", database="fokko", user="fokko", password="fokko") # create a new cursor cur = conn.cursor() # execute the INSERT statement cur.execute( sql, ( reading["net"], reading["pwr"], reading["p1"], reading["p2"], reading["n1"], reading["n2"], reading["gas"], ), ) # commit the changes to the database conn.commit() # close communication with the database cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() # "tm": unix-time-format (1489333828 => Sun, 12 Mar 2017 15:50:28 GMT) # "net": Netto counter, as displayed in the web-interface of the LS-120. # It seems equal to: p1 + p2 - n1 - n2 Perhaps also includes some user set offset. # "pwr": Actual power use in Watt (can be negative) # "p1": P1 consumption counter (low tariff) # "p2": P2 consumption counter (high tariff) # "n1": N1 production counter (low tariff) # "n2": N2 production counter (high tariff) # "Gas": counter gas-meter (in m^3) youless_address = "http://192.168.1.158/e?f=j" output = requests.get(url=youless_address) reading = output.json()[0] insert_reading(reading)
nilq/baby-python
python
import os import torch from torch.autograd import Function import torch.nn as nn from typing import * from torch.utils.cpp_extension import load ppp_ops = load(name="ppp_ops", sources=[f"{os.path.dirname(os.path.abspath(__file__))}/pointnetpp_operations.cpp", f"{os.path.dirname(os.path.abspath(__file__))}/pointnetpp_operations.cu"]) class FurthestPointSampling(Function): @staticmethod def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor: """ Uses iterative furthest point sampling to select a set of npoint features that have the largest minimum distance :param ctx: :param xyz: (B, N, 3) tensor where N > npoint :param npoint: number of features in the sampled set :return: (B, npoint) tensor containing the set """ assert(xyz.is_cuda) return ppp_ops.furthest_point_sampling_cuda(xyz, npoint) @staticmethod def backward(xyz, a=None): return None, None class GatherOperation(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: """ :param ctx: :param features: (B, C, N) tensor :param idx: (B, npoint) tensor of the features to gather :return: (B, C, npoint) tensor """ _, C, N = features.size() ctx.for_backwards = (idx, C, N) assert (features.is_cuda and idx.is_cuda) return ppp_ops.gather_points_cuda(features, idx) @staticmethod def backward(ctx, grad_out): idx, C, N = ctx.for_backwards grad_features = ppp_ops.group_points_grad_cuda(grad_out.contiguous(), idx, N) return grad_features, None class ThreeNN(Function): @staticmethod def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Find the three nearest neighbors of unknown in known :param ctx: :param unknown: torch.Tensor :param known: (B, m, 3) tensor of unknown features :return: (B, n, 3) l2 distance to the three nearest neighbors; (B, n, 3) index of 3 nearest neighbors """ assert(unknown.is_cuda and known.is_cuda) dist2, idx = ppp_ops.three_nn_cuda(unknown, known) return torch.sqrt(dist2), idx @staticmethod def backward(ctx, a=None, b=None): return None, None class ThreeInterpolate(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: """ Performs weight linear interpolation on 3 features :param ctx: :param features: (B, c, m) Features descriptors to be interpolated from :param idx: (B, n, 3) three nearest neighbors of the target features in features :param weight: (B, n, 3) weights :return: (B, c, n) tensor of the interpolated features """ B, c, m = features.size() n = idx.size(1) ctx.three_interpolate_for_backward = (idx, weight, m) assert(features.is_cuda and idx.is_cuda and weight.is_cuda) return ppp_ops.three_interpolate_cuda(features, idx, weight) @staticmethod def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ :param ctx: :param grad_out: (B, c, n) tensor with gradients of ouputs :return: (B, c, m) tensor with gradients of features """ idx, weight, m = ctx.three_interpolate_for_backward grad_features = ppp_ops.three_interpolate_grad_cuda( grad_out.contiguous(), idx, weight, m ) return grad_features, None, None class GroupingOperation(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: """ :param ctx: :param features: (B, C, N) tensor of features to group :param idx: (B, npoint, nsample) tensor containing the indices of features to group with :return: (B, C, npoint, nsample) tensor """ B, nfeatures, nsample = idx.size() _, C, N = features.size() ctx.for_backwards = (idx, N) assert(features.is_cuda and idx.is_cuda) return ppp_ops.group_points_cuda(features, idx) @staticmethod def backward(ctx, grad_out: torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ :param ctx: :param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward :return: (B, C, N) gradient of the features """ idx, N = ctx.for_backwards grad_features = ppp_ops.group_points_grad_cuda(grad_out.contiguous(), idx, N) return grad_features, None class BallQuery(Function): @staticmethod def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor: """ :param ctx: :param radius: radius of the balls :param nsample: maximum number of features in the balls :param xyz: (B, N, 3) xyz coordinates of the features :param new_xyz: (B, npoint, 3) centers of the ball query :return: (B, npoint, nsample) tensor with the indices of the features that form the query balls """ assert(new_xyz.is_cuda and xyz.is_cuda) return ppp_ops.ball_query_cuda(new_xyz, xyz, radius, nsample) @staticmethod def backward(ctx, a=None): return None, None, None, None class QueryAndGroup(nn.Module): def __init__(self, radius: float, nsample: int): """ Groups with a ball query of radius :param radius: Radius of ball :param nsample: Maximum number of features to gather in the ball """ super(QueryAndGroup, self).__init__() self.radius, self.nsample = radius, nsample def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> torch.Tensor: """ :param xyz: xyz coordinates of the features (B, N, 3) :param new_xyz: centroids (B, npoint, 3) :param features: Descriptors of the features (B, N, C) :return: (B, 3 + C, npoint, nsample) tensor """ idx = ball_query(self.radius, self.nsample, xyz, new_xyz) grouped_xyz = grouping_operation(xyz.transpose(1, 2).contiguous(), idx) # (B, 3, npoint, nsample) grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) if features is not None: grouped_features = grouping_operation(features.transpose(1, 2).contiguous(), idx) # (B, C, npoint, nsample) new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample) else: new_features = grouped_xyz return new_features class GroupAll(nn.Module): def __init__(self): """ Groups all features """ super(GroupAll, self).__init__() def forward(self, xyz, new_xyz: torch.Tensor, features: torch.Tensor = None) -> torch.Tensor: """ :param xyz: xyz coordinates of the features (B, N, 3) :param new_xyz: Ignored :param features: Descriptors of the features (B, N, C) :return: (B, C + 3, 1, N) tensor """ grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) if features is not None: features = features.transpose(1, 2).contiguous() # (B, C, N) grouped_features = features.unsqueeze(2) new_features = torch.cat( [grouped_xyz, grouped_features], dim=1 ) # (B, 3 + C, 1, N) else: new_features = grouped_xyz return new_features ball_query = BallQuery.apply furthest_point_sample = FurthestPointSampling.apply gather_operation = GatherOperation.apply three_nn = ThreeNN.apply three_interpolate = ThreeInterpolate.apply grouping_operation = GroupingOperation.apply
nilq/baby-python
python
import sys import time import pprint from web3 import Web3 from solcx import compile_source import os contract_source_path = os.environ['HOME']+'/765_a3/MyContract.sol' logs = False grcpt = False def compile_source_file(file_path): with open(file_path, 'r') as f: source = f.read() return compile_source(source) def getReceipt(tx_hash3): '''Get and wait for receipts given a transaction hash''' while True: try: # keep trying until we get a receipt time.sleep(0.1) receipt3 = w3.eth.getTransactionReceipt(tx_hash3) break except: continue receipt3 = w3.eth.getTransactionReceipt(tx_hash3) if receipt3 is not None and logs: print("empty:{0}".format(receipt3['gasUsed'])) #print amount of gas used for execution return def registerUserTransaction(sort_contract, user_id, gr=False): '''Wrapper for calling registerUser function in solidity. Returns the hash of the tentative transaction.''' if logs: print("Registering User:", user_id) tx_hash = sort_contract.functions.registerUser(user_id, "YG").transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638}) if gr: getReceipt(tx_hash) return tx_hash import numpy as np def createAccTransaction(sort_contract, user_id_1, user_id_2, gr=False): '''Wrapper for calling createAcc function in solidity. Returns the hash of the tentative transaction.''' if logs: print("Creating Account between:", user_id_1, user_id_2) amt = int(np.random.exponential(10) * 0.5) tx_hash = sort_contract.functions.createAcc(user_id_1, user_id_2, amt).transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638}) if gr: getReceipt(tx_hash) return tx_hash def closeAccTransaction(sort_contract, user_id_1, user_id_2, gr=False): '''Wrapper for calling closeAcc function in solidity. Returns the hash of the tentative transaction.''' if logs: print("Closing Account between:", user_id_1, user_id_2) tx_hash = sort_contract.functions.closeAcc(user_id_1, user_id_2).transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638}) if gr: getReceipt(tx_hash) return tx_hash def sendAmountTransaction(sort_contract, user_id_1, user_id_2, amt, gr=False): '''Wrapper for calling sendAmount function in solidity. Returns the hash of the tentative transaction.''' if logs: print("Attempt to send ", amt, " from ", user_id_1, " to ", user_id_2) tx_hash = sort_contract.functions.sendAmount(user_id_1, user_id_2, amt).transact({'txType':"0x3", 'from':w3.eth.accounts[0], 'gas':2409638}) if gr: getReceipt(tx_hash) return tx_hash def getSucCountCall(sort_contract): '''Wrapper for checking succesful transaction count in solidity. Returns the number of succesful transactions.''' tx_hash = sort_contract.functions.getSucCount().call() print("Number of Successful Transactions:", tx_hash) return tx_hash ####################################################################################################################### print("Starting Transaction Submission") w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:1558')) #start web3 on given port w3.geth.miner.start(1) #start miner with open(os.environ['HOME']+'/765_a3/MyContractAddressList') as fp: for line in fp: a,b = line.rstrip().split(':', 1) if a=="empty": contract_source_path = os.environ['HOME']+'/765_a3/MyContract.sol' compiled_sol = compile_source_file(contract_source_path) #compile solidity code contract_id, contract_interface = compiled_sol.popitem() sort_contract = w3.eth.contract(address=b, abi=contract_interface['abi']) #get contract N = 100 #number of nodes T = 1000 #number of transactions interval = 100 #interval of logging and reporting t = 0 # Register N users wait_list = [] for i in range(N): wait_list.append(registerUserTransaction(sort_contract, i, gr=grcpt)) if not grcpt: for wl in wait_list: getReceipt(wl) #Construct power law degree distribution graph using networkx import networkx power_graph = networkx.barabasi_albert_graph(N, int(0.7*N)) #create accounts according to transactions wait_list = [] # for i in range(N): # for j in range(i, N): # edge = (i, j) for edge in power_graph.edges: # print(edge) wait_list.append(createAccTransaction(sort_contract, edge[0], edge[1], gr=grcpt)) if not grcpt: for wl in wait_list: getReceipt(wl) #get initial succesful transaction count. should be 0. getSucCountCall(sort_contract) wait_list = [] while (t<T): sender = np.random.randint(N) recvr = np.random.randint(N) if (sender==recvr): #if sender and reciever same continue continue t += 1 wait_list.append(sendAmountTransaction(sort_contract, sender, recvr, 1, gr=grcpt)) #send amount transaction between sender and reciever if (t%interval==0): if not grcpt: for wl in wait_list: getReceipt(wl) wait_list = [] getSucCountCall(sort_contract) print("Number of Total Transactions:", t) w3.geth.miner.stop() #stop miner
nilq/baby-python
python
import xacc xacc.Initialize() # Get access to D-Wave QPU and # allocate some qubits dwave = xacc.getAccelerator('dwave') qubits = dwave.createBuffer('q') # Define the function we'd like to # off-load to the QPU, here # we're using a the QMI low-level language @xacc.qpu(accelerator=dwave) def f(buffer, h, j): qmi(0,0,h) qmi(1,1,h) qmi(0,1,j) # Execute on D-Wave f(qubits, 1., 2.) # Print the buffer, this displays # solutions and energies print(qubits) xacc.Finalize()
nilq/baby-python
python
# -*- coding: utf-8 -*- ''' Redis SDB module ================ .. versionadded:: 2019.2.0 This module allows access to Redis using an ``sdb://`` URI. Like all SDB modules, the Redis module requires a configuration profile to be configured in either the minion or master configuration file. This profile requires very little. For example: .. code-block:: yaml sdb_redis: driver: redis host: 127.0.0.1 port: 6379 password: pass db: 1 The ``driver`` refers to the Redis module, all other options are optional. For option details see: https://redis-py.readthedocs.io/en/latest/. ''' from __future__ import absolute_import, print_function, unicode_literals try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __func_alias__ = { 'set_': 'set' } __virtualname__ = 'redis' def __virtual__(): ''' Module virtual name. ''' if not HAS_REDIS: return (False, 'Please install python-redis to use this SDB module.') return __virtualname__ def set_(key, value, profile=None): ''' Set a value into the Redis SDB. ''' if not profile: return False redis_kwargs = profile.copy() redis_kwargs.pop('driver') redis_conn = redis.StrictRedis(**redis_kwargs) return redis_conn.set(key, value) def get(key, profile=None): ''' Get a value from the Redis SDB. ''' if not profile: return False redis_kwargs = profile.copy() redis_kwargs.pop('driver') redis_conn = redis.StrictRedis(**redis_kwargs) return redis_conn.get(key) def delete(key, profile=None): ''' Delete a key from the Redis SDB. ''' if not profile: return False redis_kwargs = profile.copy() redis_kwargs.pop('driver') redis_conn = redis.StrictRedis(**redis_kwargs) return redis_conn.delete(key)
nilq/baby-python
python
import ast import os import logging from contextlib import contextmanager from pystatic.arg import Arg, Argument from typing import List, Tuple from pystatic.target import Target from pystatic.symid import symid2list from pystatic.typesys import TypeClassTemp, TypeFuncTemp, TypeIns, TypeTemp, TypeType from pystatic.symtable import SymTable logger = logging.getLogger(__name__) _default_dir = os.path.curdir + os.path.sep + 'out' _indent_unit = ' ' * 4 IMPORT = 1 FUN = 2 CLS = 3 VAR = 4 def stubgen(targets: List[Target], rt_dir=_default_dir): if not mkstub_dir(rt_dir): return for target in targets: stub_file = filepath(target, rt_dir) result = stubgen_main(target) with open(stub_file, 'w') as f: f.write(result) def mkstub_dir(dir: str): if os.path.exists(dir): if not os.path.isdir(dir): r_path = os.path.realpath(dir) logger.error(f'{r_path} already exists and is a file.') return False return True else: os.mkdir(dir) return True def filepath(target: Target, rt_dir: str): symidlist = symid2list(target.symid) cur_dir = rt_dir for i, name in enumerate(symidlist): next_dir = os.path.join(cur_dir, name) if not os.path.exists(next_dir): if i != len(symidlist) - 1: os.mkdir(next_dir) cur_dir = next_dir return cur_dir + '.pyi' def stubgen_main(target: Target) -> str: creator = StubGen(target) return creator.generate() class Node: def __init__(self, symid: str): self.symid = symid self.subsymid = {} self.alias = None def set_alias(self, alias: str): self.alias = alias class NameTree: def __init__(self, module_symid: str): self.root = Node('') self.module_symid = module_symid def ask(self, temp: TypeTemp) -> str: module_symid = temp.module_symid symid = temp.name symidlist = symid2list(module_symid) + symid2list(symid) cur_node = self.root namelist = [] for subname in symidlist: if subname in cur_node.subsymid: cur_node = cur_node.subsymid[subname] if cur_node.alias: namelist = [cur_node.alias] else: namelist.append(subname) else: return '.'.join(symidlist) return '.'.join(namelist) def add_import(self, module_symid: str, symid: str, asname: str): symidlist = symid2list(module_symid) + symid2list(symid) cur_node = self.root for subname in symidlist: if not subname: continue if subname in cur_node.subsymid: cur_node = cur_node.subsymid[subname] else: cur_node.subsymid[subname] = Node(subname) if asname: cur_node.alias = asname class StubGen: def __init__(self, target: Target): self.target = target self.name_tree = NameTree(target.symid) self.in_class = False self.from_typing = set() self.cur_symid = '' @property def module_symid(self): return self.target.symid @staticmethod def scoped_list_to_str(lst: List[Tuple[str, int]]): if not lst: return '' results = [lst[0][0]] prev_scope = lst[0][1] for item, scope in lst[1:]: if prev_scope == scope: results.append(item) else: results.append('\n') results.append(item) prev_scope = scope return ''.join(results) def generate(self): src_str = self.stubgen_symtable(self.target.symtable, 0) sym_local = self.target.symtable.local typing_list = filter( lambda name: (name not in sym_local) and name.find('.') < 0, self.from_typing) impt_typing = ', '.join(typing_list) if impt_typing: return f'from typing import {impt_typing}\n' + src_str else: return src_str @contextmanager def enter_class(self, clsname: str): old_symid = self.cur_symid old_in_class = self.in_class if not self.cur_symid: self.cur_symid = f'{clsname}' else: self.cur_symid += f'.{clsname}' yield self.cur_symid = old_symid self.in_class = old_in_class def indent_prefix(self, level: int) -> str: return _indent_unit * level def stubgen_symtable(self, symtable: 'SymTable', level: int): results: List[Tuple[str, int]] = [] impt_stmt = self.stubgen_import(symtable, level) if impt_stmt: results.append((impt_stmt, IMPORT)) for name, entry in symtable.local.items(): tpins = entry.get_type() if not tpins: logger.warn(f'{name} has incomplete type.') continue temp = tpins.temp if isinstance(tpins, TypeType): assert isinstance(temp, TypeClassTemp) results.append((self.stub_cls_def(name, temp, level), CLS)) elif isinstance(temp, TypeFuncTemp): results.append((self.stub_fun_def(name, temp, level), FUN)) else: results.append((self.stub_var_def(name, temp, level), VAR)) return self.scoped_list_to_str(results) def stubgen_import(self, symtable: 'SymTable', level: int) -> str: results = [] for impt_node in symtable._import_nodes: impt_dict = split_import_stmt(impt_node, symtable.glob_symid) if isinstance(impt_node, ast.Import): import_stmt = 'import ' import_subitem = [] for symid, infolist in impt_dict.items(): module_name = symid for asname, origin_name in infolist: assert not origin_name if asname == module_name: top_name = symid2list(asname)[0] if top_name: symtable.local.pop(top_name, None) import_subitem.append(f'{module_name}') self.name_tree.add_import(module_name, '', '') else: symtable.local.pop(asname, None) import_subitem.append(f'{module_name} as {asname}') self.name_tree.add_import(module_name, '', asname) if len(import_subitem) > 5: import_stmt += '(' + ', '.join(import_subitem) + ')' else: import_stmt += ', '.join(import_subitem) results.append((import_stmt, level)) else: for symid, infolist in impt_dict.items(): module_name = symid from_impt: List[str] = [] for asname, origin_name in infolist: if origin_name == asname: symtable.local.pop(asname, None) from_impt.append(f"{asname}") self.name_tree.add_import(module_name, origin_name, '') else: symtable.local.pop(asname, None) from_impt.append(f"{origin_name} as {asname}") self.name_tree.add_import(module_name, origin_name, asname) if from_impt: impt_str = ', '.join(from_impt) if len(from_impt) > 5: from_stmt = f'from {module_name} import ({impt_str})' else: from_stmt = f'from {module_name} import {impt_str}' results.append((from_stmt, level)) if not results: return '' else: return '\n'.join( [_indent_unit * ident + stmt for stmt, ident in results]) + '\n' def stub_var_def(self, varname: str, temp: TypeTemp, level: int): module_symid = temp.module_symid symid = temp.name type_str = '' if module_symid == 'builtins': type_str = symid elif module_symid == 'typing': self.from_typing.add(symid) type_str = symid elif module_symid == self.module_symid: if self.cur_symid and symid.find( self.cur_symid) == 0 and len(symid) > len(self.cur_symid): type_str = symid[len(self.cur_symid) + 1:] else: type_str = symid else: type_str = self.name_tree.ask(temp) return _indent_unit * level + varname + ': ' + type_str + '\n' def stub_cls_def(self, clsname: str, temp: TypeClassTemp, level: int): header = self.stub_cls_def_header(clsname, temp, level) inner_symtable = temp.get_inner_symtable() var_strlist = [] with self.enter_class(clsname): for name, tpins in temp.var_attr.items(): var_strlist.append( self.stub_var_def(name, tpins.temp, level + 1)) body = self.stubgen_symtable(inner_symtable, level + 1) if not body or body == '\n': header += '...\n' return header if var_strlist: body = ''.join(var_strlist) + '\n' + body return header + '\n' + body def stub_cls_def_header(self, clsname: str, temp: TypeClassTemp, level: int) -> str: return _indent_unit * level + 'class ' + clsname + ': ' def _stub_single_fun(self, name: str, argument: Argument, ret: TypeIns): """generate single function type annotations in pyi file""" def get_arg_str(arg: Arg): cur_str = arg.name cur_str += ': ' + str(arg.ann) if arg.valid: cur_str += '=...' return cur_str arg_strlist = [] for arg in argument.args: cur_str = get_arg_str(arg) arg_strlist.append(cur_str) if argument.vararg: cur_str = get_arg_str(argument.vararg) arg_strlist.append(cur_str) for arg in argument.kwonlyargs: cur_str = get_arg_str(arg) arg_strlist.append(cur_str) if argument.kwarg: cur_str = get_arg_str(argument.kwarg) arg_strlist.append(cur_str) param = '(' + ', '.join(arg_strlist) + ')' return 'def ' + name + param + ': ...\n' def stub_fun_def(self, funname: str, temp: TypeFuncTemp, level: int, is_method=False) -> str: is_overload = len(temp.overloads) > 1 if is_overload: self.from_typing.add('overload') # import overload from typing indent_prefix = self.indent_prefix(level) fun_pyi = [] for argument, ret in temp.overloads: fun_res = self._stub_single_fun(funname, argument, ret) if is_overload: cur_fun_pyi = indent_prefix + '@overload\n' else: cur_fun_pyi = '' cur_fun_pyi += indent_prefix + fun_res fun_pyi.append(cur_fun_pyi) return ''.join(fun_pyi)
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright (c) 2012, Rui Carmo Description: Docstring utility functions License: MIT (see LICENSE.md for details) """ import os, sys, logging import inspect from bottle import app log = logging.getLogger() def docs(): """Gather all docstrings related to routes and return them grouped by module""" routes = [] modules = {} for route in app().routes: doc = inspect.getdoc(route.callback) or inspect.getcomments(route.callback) if not doc: doc = '' module = inspect.getmodule(route.callback).__name__ item = { 'method': route.method, 'route': route.rule, 'function': route.callback.__name__, 'module': module, 'doc': inspect.cleandoc(doc) } if not module in modules: modules[module] = [] modules[module].append(item) return modules
nilq/baby-python
python
import pytest from pydantic import ValidationError from porcupine.base import Serializer class User(object): def __init__(self, name=None, surname=None, age=None): self.name = name self.surname = surname self.age = age class UserSerializer(Serializer): name: str surname: str age: int = None @pytest.fixture def user_full(): user = User('foo', 'bar', 23) return user @pytest.fixture def user_required_only(): user = User('foo', 'bar') return user @pytest.fixture def user_none(): user = User() return user class TestSimpleObject: def test_successful_serialisation(self, user_full): dictionary = UserSerializer(user_full).dict() assert dictionary == {'name': 'foo', 'surname': 'bar', 'age': 23} def test_non_required_attributes(self, user_required_only): dictionary = UserSerializer(user_required_only).dict() assert dictionary == {'name': 'foo', 'surname': 'bar', 'age': None} def test_required_attributes(self, user_none): expected_errors = [ {'loc': ('name',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'}, {'loc': ('surname',), 'msg': 'none is not an allowed value', 'type': 'type_error.none.not_allowed'} ] with pytest.raises(ValidationError) as exception: UserSerializer(user_none).dict() assert exception.value.errors() == expected_errors
nilq/baby-python
python
from pkg_resources import parse_version from configparser import ConfigParser import setuptools assert parse_version(setuptools.__version__)>=parse_version('36.2') # note: all settings are in settings.ini; edit there, not here config = ConfigParser(delimiters=['=']) config.read('settings.ini') cfg = config['DEFAULT'] cfg_keys = 'version description keywords author author_email'.split() expected = (cfg_keys + "lib_name user branch license status min_python audience language".split() ) for o in expected: assert o in cfg, "missing expected setting: {}".format(o) setup_cfg = {o:cfg[o] for o in cfg_keys} licenses = { 'apache2': ( 'Apache Software License 2.0', 'OSI Approved :: Apache Software License'), } statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha', '4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ] py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8'.split() def parse_requirements(name): return cfg[name].strip("\n").split("\n") requirements = parse_requirements("requirements") o_gpu = parse_requirements("onnxgpu") o_cpu = parse_requirements("onnxcpu") interp = parse_requirements("interp") all_req = parse_requirements("all") extras = {} extras["onnx-gpu"] = ['onnxruntime-gpu'] extras["onnx-cpu"] = ['onnxruntime-cpu'] extras["interp"] = ['plotly', 'plotnine', 'shap<0.36.0'] extras["all"] = ['fastai', 'onnxruntime-gpu', 'plotly', 'plotnine', 'shap<0.36.0'] lic = licenses[cfg['license']] min_python = cfg['min_python'] setuptools.setup( name = cfg['lib_name'], license = lic[0], classifiers = [ 'Development Status :: ' + statuses[int(cfg['status'])], 'Intended Audience :: ' + cfg['audience'].title(), 'License :: ' + lic[1], 'Natural Language :: ' + cfg['language'].title(), ] + [ 'Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):] ], url = cfg['git_url'], packages = setuptools.find_packages(), include_package_data = True, install_requires = requirements, extras_require=extras, dependency_links = cfg.get('dep_links','').split(), python_requires = '>=' + cfg['min_python'], long_description = open('README.md').read(), long_description_content_type = 'text/markdown', zip_safe = False, entry_points = { 'console_scripts': cfg.get('console_scripts','').split() }, **setup_cfg)
nilq/baby-python
python
import json import requests __version__ = '1.0.2' class TelenorWeb2SMSException(Exception): """A generic exception for all others to extend.""" def __str__(self): # Use the class docstring if the exception message hasn't been provided if len(self.args) == 0: return self.__doc__ return super(TelenorWeb2SMSException, self).__str__() class AuthenticationFailure(TelenorWeb2SMSException): """The given username and password might not be correct.""" class SMSNotSent(TelenorWeb2SMSException): """SMS has not been sent.""" class UnsupportedPhoneNumberFormat(TelenorWeb2SMSException): """The given phone number isn't in any of the supported formats.""" class TelenorWeb2SMS(object): auth_url = 'https://www.telenor.rs/portal/index.php' send_sms_url = 'https://www.telenor.rs/portal/usluge/sendsms.php' def __init__(self, username, password, auth_url=None): self.session = requests.session(headers={ 'User-Agent': "telenor_web2sms/%s" % __version__, }) self.auth(username, password, auth_url) def auth(self, username, password, auth_url=None): auth_url = auth_url or self.auth_url r = self.session.post( url=auth_url, data={ 'brtel': username, 'lozinka': password, } ) # Check if we made a bad request r.raise_for_status() if r.url == self.auth_url: raise AuthenticationFailure() def parse_phone_number(self, phone_number): if phone_number.startswith('0'): normalized = phone_number[1:] elif phone_number.startswith('+381'): normalized = phone_number[4:] else: raise UnsupportedPhoneNumberFormat() area_code = normalized[:2] number = normalized[2:] return area_code, number def send_sms(self, phone_number, message, send_sms_url=None): send_sms_url = send_sms_url or self.send_sms_url area_code, number = self.parse_phone_number(phone_number) r = self.session.post( url=send_sms_url, data={ 'pozivni': area_code, 'BBroj': number, 'smsporuka': message, } ) # Check if we made a bad request r.raise_for_status() j = json.loads(r.content) if j['status'] != 'OK': raise SMSNotSent("SMS has not been sent, because '%s'." % j['message']) def __call__(self, phone_number, message, send_sms_url=None): self.send_sms(message, phone_number, send_sms_url) def main(): import argparse import os import sys def env(e): return os.environ.get(e, '') parser = argparse.ArgumentParser( description='Send a SMS through the Telenor WEB2SMS web app' ) parser.add_argument( '-u', '--username', default = env('TELENOR_WEB2SMS_USERNAME'), help='Your Telenor WEB2SMS username. Defaults to env[TELENOR_WEB2SMS_USERNAME]' ) parser.add_argument( '-p', '--phone-number', help='Recipients phone number' ) # As Telenor WEB2SMS cuts of newlines, there's no point in allowing # multiline input. parser.add_argument( '-m', '--message', help='Message to send' ) parser.add_argument( '--version', action='version', version="%(prog)s " + __version__ ) args = parser.parse_args() try: # Authenticate to Telenor WEB2SMS username = args.username or raw_input('What is your Telenor WEB2SMS username? ') password = env('TELENOR_WEB2SMS_PASSWORD') or raw_input('What is your Telenor WEB2SMS password? ') web2sms = TelenorWeb2SMS(username, password) # Send SMS phone_number = args.phone_number or raw_input('Who are you sending this SMS to? ') message = args.message or raw_input('Enter your message: ') web2sms.send_sms(phone_number, message) print 'SMS sent successfully.' except Exception, e: print >> sys.stderr, "ERROR:", e sys.exit(1) if __name__ == '__main__': main()
nilq/baby-python
python
from functools import partial from typing import Callable, Tuple import numpy as np from hmc.core import for_loop, while_loop from hmc.integrators.terminal import cond def step(val: Tuple, zo: np.ndarray, step_size: float, vector_field: Callable) -> Tuple: """Single step of the implicit midpoint integrator. Computes the midpoint, evaluates the gradient at the midpoint, takes a step from the initial position in the direction of the gradient at the midpoint, and measures the difference between the resulting point and the candidate stationary point. """ zncand, _, num_iters = val zm = (zncand + zo) / 2. dz = np.hstack(vector_field(*np.split(zm, 2))) zn = zo + step_size * dz delta = zn - zncand return zn, delta, num_iters + 1 def _single_step_implicit_midpoint( vector_field: Callable, zo: Tuple[np.ndarray], step_size: float, thresh: float, max_iters: int) -> Tuple: """Implements the implicit midpoint integrator. The implicit midpoint integrator is symmetric, symplectic, and second-order accurate (third-order local error). Args: vector_field: The Hamiltonian vector field. zo: Tuple containing the position and momentum variables in the original phase space. step_size: Integration step_size. thresh: Convergence tolerance for fixed point iterations. max_iters: Maximum number of fixed point iterations. Returns: qn: The terminal position variable. pn: The terminal momentum variable. num_iters: The number of fixed point iterations to find the midpoint. success: Boolean flag indicating successful integration. """ # Initial candidate. qo, po = zo zo = np.hstack((qo, po)) # Fixed point iteration. delta = np.ones_like(zo) * np.inf dz = np.hstack(vector_field(*np.split(zo, 2))) zopred = zo + 0.5 * step_size * dz val = (zopred, delta, 0) zn, delta, num_iters = while_loop( partial(cond, thresh=thresh, max_iters=max_iters), partial(step, zo=zo, step_size=step_size, vector_field=vector_field), val) # Determine whether or not the integration was successful. success = np.all(delta < thresh) qn, pn = np.split(zn, 2) return (qn, pn), num_iters, success def implicit_midpoint( vector_field: Callable, zo: Tuple[np.ndarray], step_size: float, num_steps: int, thresh: float=1e-6, max_iters: int=1000 ) -> Tuple: def step(it: int, val: Tuple): zo, so = val zn, _, sn = _single_step_implicit_midpoint(vector_field, zo, step_size, thresh, max_iters) success = np.logical_and(so, sn) return zn, success (qn, pn), success = for_loop(0, num_steps, step, (zo, True)) return (qn, pn), success
nilq/baby-python
python
import pandas as pd from datetime import datetime import shlex import subprocess import requests from reportlab.pdfgen import canvas def generateReport(event_ts, keys): print('printing report') directory = "./data/" csv_name = "result.csv" csvpath = directory + csv_name csv = pd.read_csv(csvpath) # Querry Classification Summary Report Querry_Classification_Summary_Report = keys['report_header'] # COunting Total No. of Querries total_querry_count = csv.count() total_querry_count = str(total_querry_count[0]) all_product_count = csv["Category"].value_counts().rename_axis('products').reset_index(name='counts') total_products = all_product_count.count() total_products = total_products[0] indent = 100 indent_next = 250 pdf_name = "result.pdf" pdfpath = directory + pdf_name c = canvas.Canvas(pdfpath) c.drawString(indent, 800, Querry_Classification_Summary_Report) c.drawString(indent, 750, "Total No. Of querries = ") c.drawString(indent_next, 750, total_querry_count) c.drawString(indent, 725, "Product") c.drawString(indent_next, 725, "Count") height = 700 for i in range(total_products): c.drawString(indent, height, str(all_product_count["products"][i])) c.drawString(indent_next, height, str(all_product_count["counts"][i])) height = height - 25 c.save() print('pdf created') cha = keys['channel_report'] chai = keys['slack_bot_token'] chaii = 'Please find the report attached' try: command_line = 'curl -F file=@"./data/result.pdf" -F "initial_comment=%s" -F channels=%s -H "Authorization: Bearer %s" https://slack.com/api/files.upload' % (chaii, cha, chai) args = shlex.split(command_line) subprocess.Popen(args) print(args) except (AssertionError, AttributeError, EOFError, FloatingPointError, GeneratorExit, ImportError, IndexError, KeyError, KeyboardInterrupt, MemoryError, NameError, NotImplementedError, OSError, OverflowError, ReferenceError, RuntimeError, StopIteration, SyntaxError, IndentationError, TabError, SystemError, SystemExit, TypeError, UnboundLocalError, UnicodeError, UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError, ValueError, ZeroDivisionError): headers = { 'Authorization': keys['slack_bot_token'] } print(headers) files = { 'file': ('C:\\Users\\z003ww7c.AD001\\PycharmProjects\\SlackIntegration\\data\\result.csv', open('C:\\Users\\z003ww7c.AD001\\PycharmProjects\\SlackIntegration\\data\\result.csv', 'rb')), 'initial_comment': 'Please find the report attached', 'channels': keys['channel_report'], } url = 'https://slack.com/api/files.upload' requests.post(url, headers=headers, files=files)
nilq/baby-python
python
"""regex utils """ import re def remove_digits(s: str) -> str: """ removes digits in a string """ return re.sub("\d+", "", s)
nilq/baby-python
python
''' Transcribing DNA into RNA http://rosalind.info/problems/rna/ Problem An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'. Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing all occurrences of 'T' in t with 'U' in u. Given: A DNA string t having length at most 1000 nt. Return: The transcribed RNA string of t. Sample Dataset GATGGAACTTGACTACGTAAATT Sample Output GAUGGAACUUGACUACGUAAAUU ''' from lib.sequences import DNA def run_rna(sequence): ''' Converts a DNA string into RNA ''' return DNA(sequence).to_rna().sequence
nilq/baby-python
python