code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from airflow.models import DAG, TaskInstance
from airflow.providers.amazon.aws.hooks.athena import AWSAthenaHook
from airflow.providers.amazon.aws.operators.athena import AWSAthenaOperator
from airflow.utils import timezone
from airflow.utils.timezone import datetime
TEST_DAG_ID = 'unit_tests'
DEFAULT_DATE = datetime(2018, 1, 1)
ATHENA_QUERY_ID = 'eac29bf8-daa1-4ffc-b19a-0db31dc3b784'
MOCK_DATA = {
'task_id': 'test_aws_athena_operator',
'query': 'SELECT * FROM TEST_TABLE',
'database': 'TEST_DATABASE',
'outputLocation': 's3://test_s3_bucket/',
'client_request_token': 'eac427d0-1c6d-4dfb-96aa-2835d3ac6595',
'workgroup': 'default'
}
query_context = {
'Database': MOCK_DATA['database']
}
result_configuration = {
'OutputLocation': MOCK_DATA['outputLocation']
}
# noinspection PyUnusedLocal
# pylint: disable=unused-argument
class TestAWSAthenaOperator(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
self.dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once',
default_args=args,
schedule_interval='@once')
self.athena = AWSAthenaOperator(task_id='test_aws_athena_operator', query='SELECT * FROM TEST_TABLE',
database='TEST_DATABASE', output_location='s3://test_s3_bucket/',
client_request_token='eac427d0-1c6d-4dfb-96aa-2835d3ac6595',
sleep_time=1, max_tries=3, dag=self.dag)
def test_init(self):
self.assertEqual(self.athena.task_id, MOCK_DATA['task_id'])
self.assertEqual(self.athena.query, MOCK_DATA['query'])
self.assertEqual(self.athena.database, MOCK_DATA['database'])
self.assertEqual(self.athena.aws_conn_id, 'aws_default')
self.assertEqual(self.athena.client_request_token, MOCK_DATA['client_request_token'])
self.assertEqual(self.athena.sleep_time, 1)
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=("SUCCESS",))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_hook_run_small_success_query(self, mock_conn, mock_run_query, mock_check_query_status):
self.athena.execute(None)
mock_run_query.assert_called_once_with(MOCK_DATA['query'], query_context, result_configuration,
MOCK_DATA['client_request_token'], MOCK_DATA['workgroup'])
self.assertEqual(mock_check_query_status.call_count, 1)
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=("RUNNING", "RUNNING", "SUCCESS",))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_hook_run_big_success_query(self, mock_conn, mock_run_query, mock_check_query_status):
self.athena.execute(None)
mock_run_query.assert_called_once_with(MOCK_DATA['query'], query_context, result_configuration,
MOCK_DATA['client_request_token'], MOCK_DATA['workgroup'])
self.assertEqual(mock_check_query_status.call_count, 3)
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=(None, None,))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_hook_run_failed_query_with_none(self, mock_conn, mock_run_query, mock_check_query_status):
with self.assertRaises(Exception):
self.athena.execute(None)
mock_run_query.assert_called_once_with(MOCK_DATA['query'], query_context, result_configuration,
MOCK_DATA['client_request_token'], MOCK_DATA['workgroup'])
self.assertEqual(mock_check_query_status.call_count, 3)
@mock.patch.object(AWSAthenaHook, 'get_state_change_reason')
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=("RUNNING", "FAILED",))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_hook_run_failure_query(self, mock_conn, mock_run_query, mock_check_query_status,
mock_get_state_change_reason):
with self.assertRaises(Exception):
self.athena.execute(None)
mock_run_query.assert_called_once_with(MOCK_DATA['query'], query_context, result_configuration,
MOCK_DATA['client_request_token'], MOCK_DATA['workgroup'])
self.assertEqual(mock_check_query_status.call_count, 2)
self.assertEqual(mock_get_state_change_reason.call_count, 1)
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=("RUNNING", "RUNNING", "CANCELLED",))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_hook_run_cancelled_query(self, mock_conn, mock_run_query, mock_check_query_status):
with self.assertRaises(Exception):
self.athena.execute(None)
mock_run_query.assert_called_once_with(MOCK_DATA['query'], query_context, result_configuration,
MOCK_DATA['client_request_token'], MOCK_DATA['workgroup'])
self.assertEqual(mock_check_query_status.call_count, 3)
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=("RUNNING", "RUNNING", "RUNNING",))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_hook_run_failed_query_with_max_tries(self, mock_conn, mock_run_query, mock_check_query_status):
with self.assertRaises(Exception):
self.athena.execute(None)
mock_run_query.assert_called_once_with(MOCK_DATA['query'], query_context, result_configuration,
MOCK_DATA['client_request_token'], MOCK_DATA['workgroup'])
self.assertEqual(mock_check_query_status.call_count, 3)
@mock.patch.object(AWSAthenaHook, 'check_query_status', side_effect=("SUCCESS",))
@mock.patch.object(AWSAthenaHook, 'run_query', return_value=ATHENA_QUERY_ID)
@mock.patch.object(AWSAthenaHook, 'get_conn')
def test_xcom_push_and_pull(self, mock_conn, mock_run_query, mock_check_query_status):
ti = TaskInstance(task=self.athena, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.xcom_pull(task_ids='test_aws_athena_operator'),
ATHENA_QUERY_ID)
# pylint: enable=unused-argument
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.utils.encoding import force_text
from .models import (
AbstractBase1, AbstractBase2, AbstractBase3, Child1, Child2, Child3,
Child4, Child5, Child6, Child7, RelatedModel, RelationModel,
)
class ManagersRegressionTests(TestCase):
def test_managers(self):
Child1.objects.create(name='fred', data='a1')
Child1.objects.create(name='barney', data='a2')
Child2.objects.create(name='fred', data='b1', value=1)
Child2.objects.create(name='barney', data='b2', value=42)
Child3.objects.create(name='fred', data='c1', comment='yes')
Child3.objects.create(name='barney', data='c2', comment='no')
Child4.objects.create(name='fred', data='d1')
Child4.objects.create(name='barney', data='d2')
Child5.objects.create(name='fred', comment='yes')
Child5.objects.create(name='barney', comment='no')
Child6.objects.create(name='fred', data='f1', value=42)
Child6.objects.create(name='barney', data='f2', value=42)
Child7.objects.create(name='fred')
Child7.objects.create(name='barney')
self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"])
self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"])
self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"])
self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"])
# Since Child6 inherits from Child4, the corresponding rows from f1 and
# f2 also appear here. This is the expected result.
self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [
"<Child4: d1>",
"<Child4: d2>",
"<Child4: f1>",
"<Child4: f2>"
]
)
self.assertQuerysetEqual(Child4.manager1.all(), [
"<Child4: d1>",
"<Child4: f1>"
],
ordered=False
)
self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"])
self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>"])
self.assertQuerysetEqual(Child7._default_manager.order_by('name'), [
"<Child7: barney>",
"<Child7: fred>"
]
)
def test_abstract_manager(self):
# Accessing the manager on an abstract model should
# raise an attribute error with an appropriate message.
# This error message isn't ideal, but if the model is abstract and
# a lot of the class instantiation logic isn't invoked; if the
# manager is implied, then we don't get a hook to install the
# error-raising manager.
msg = "type object 'AbstractBase3' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase3.objects.all()
def test_custom_abstract_manager(self):
# Accessing the manager on an abstract model with an custom
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase2 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase2.restricted.all()
def test_explicit_abstract_manager(self):
# Accessing the manager on an abstract model with an explicit
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase1 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase1.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
def test_swappable_manager(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['managers_regress'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model should
# raise an attribute error with a helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
finally:
apps.app_configs['managers_regress'].models = _old_models
apps.all_models['managers_regress'] = _old_models
apps.clear_cache()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
def test_custom_swappable_manager(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['managers_regress'].models.copy()
try:
class SwappableModel(models.Model):
stuff = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.stuff.all()
finally:
apps.app_configs['managers_regress'].models = _old_models
apps.all_models['managers_regress'] = _old_models
apps.clear_cache()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
def test_explicit_swappable_manager(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['managers_regress'].models.copy()
try:
class SwappableModel(models.Model):
objects = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
finally:
apps.app_configs['managers_regress'].models = _old_models
apps.all_models['managers_regress'] = _old_models
apps.clear_cache()
def test_regress_3871(self):
related = RelatedModel.objects.create()
relation = RelationModel()
relation.fk = related
relation.gfk = related
relation.save()
relation.m2m.add(related)
t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}')
self.assertEqual(
t.render(Context({'related': related})),
''.join([force_text(relation.pk)] * 3),
)
def test_field_can_be_called_exact(self):
# Make sure related managers core filters don't include an
# explicit `__exact` lookup that could be interpreted as a
# reference to a foreign `exact` field. refs #23940.
related = RelatedModel.objects.create(exact=False)
relation = related.test_fk.create()
self.assertEqual(related.test_fk.get(), relation)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'%s rows deleted': '%s righe ("record") cancellate',
'%s rows updated': '%s righe ("record") modificate',
'Available databases and tables': 'Database e tabelle disponibili',
'Cannot be empty': 'Non può essere vuoto',
'Check to delete': 'Seleziona per cancellare',
'Client IP': 'Client IP',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Richiesta (request) corrente',
'Current response': 'Risposta (response) corrente',
'Current session': 'Sessione (session) corrente',
'DB Model': 'Modello di DB',
'Database': 'Database',
'Delete:': 'Cancella:',
'Description': 'Descrizione',
'E-mail': 'E-mail',
'Edit': 'Modifica',
'Edit This App': 'Modifica questa applicazione',
'Edit current record': 'Modifica record corrente',
'First name': 'Nome',
'Group ID': 'ID Gruppo',
'Hello World': 'Salve Mondo',
'Hello World in a flash!': 'Salve Mondo in un flash!',
'Import/Export': 'Importa/Esporta',
'Index': 'Indice',
'Internal State': 'Stato interno',
'Invalid Query': 'Richiesta (query) non valida',
'Invalid email': 'Email non valida',
'Last name': 'Cognome',
'Layout': 'Layout',
'Main Menu': 'Menu principale',
'Menu Model': 'Menu Modelli',
'Name': 'Nome',
'New Record': 'Nuovo elemento (record)',
'No databases in this application': 'Nessun database presente in questa applicazione',
'Origin': 'Origine',
'Password': 'Password',
'Powered by': 'Powered by',
'Query:': 'Richiesta (query):',
'Record ID': 'Record ID',
'Registration key': 'Chiave di Registazione',
'Reset Password key': 'Resetta chiave Password ',
'Role': 'Ruolo',
'Rows in table': 'Righe nella tabella',
'Rows selected': 'Righe selezionate',
'Stylesheet': 'Foglio di stile (stylesheet)',
'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?',
'Table name': 'Nome tabella',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.',
'The output of the file is a dictionary that was rendered by the view': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista',
'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)",
'Timestamp': 'Ora (timestamp)',
'Update:': 'Aggiorna:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).',
'User ID': 'ID Utente',
'View': 'Vista',
'Welcome %s': 'Benvenuto %s',
'Welcome to web2py': 'Benvenuto su web2py',
'Which called the function': 'che ha chiamato la funzione',
'You are successfully running web2py': 'Stai eseguendo web2py con successo',
'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità',
'You visited the url': "Hai visitato l'URL",
'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura',
'cache': 'cache',
'change password': 'Cambia password',
'Online examples': 'Vedere gli esempi',
'Administrative interface': "Interfaccia amministrativa",
'customize me!': 'Personalizzami!',
'data uploaded': 'dati caricati',
'database': 'database',
'database %s select': 'database %s select',
'db': 'db',
'design': 'progetta',
'Documentation': 'Documentazione',
'done!': 'fatto!',
'edit profile': 'modifica profilo',
'export as csv file': 'esporta come file CSV',
'hello world': 'salve mondo',
'insert new': 'inserisci nuovo',
'insert new %s': 'inserisci nuovo %s',
'invalid request': 'richiesta non valida',
'located in the file': 'presente nel file',
'login': 'accesso',
'logout': 'uscita',
'lost password?': 'dimenticato la password?',
'new record inserted': 'nuovo record inserito',
'next 100 rows': 'prossime 100 righe',
'not authorized': 'non autorizzato',
'or import from csv file': 'oppure importa da file CSV',
'previous 100 rows': '100 righe precedenti',
'record': 'record',
'record does not exist': 'il record non esiste',
'record id': 'record id',
'register': 'registrazione',
'selected': 'selezionato',
'state': 'stato',
'table': 'tabella',
'unable to parse csv file': 'non riesco a decodificare questo file CSV',
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/conformance/statements/for-ofStatements/ES5For-of9.ts] ////
//// [ES5For-of9.ts]
function foo() {
return { x: 0 };
}
for (foo().x of []) {
for (foo().x of []) {
var p = foo().x;
}
}
//// [ES5For-of9.js]
"use strict";
function foo() {
return { x: 0 };
}
for (foo().x of []) {
for (foo().x of []) {
var p = foo().x;
}
}
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/ES5For-of9(target=es2015).js
|
/**
* --------------------------------------------------------------------------
* Bootstrap scrollspy.js
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
* --------------------------------------------------------------------------
*/
import BaseComponent from './base-component.js'
import EventHandler from './dom/event-handler.js'
import SelectorEngine from './dom/selector-engine.js'
import {
defineJQueryPlugin, getElement, isDisabled, isVisible
} from './util/index.js'
/**
* Constants
*/
const NAME = 'scrollspy'
const DATA_KEY = 'bs.scrollspy'
const EVENT_KEY = `.${DATA_KEY}`
const DATA_API_KEY = '.data-api'
const EVENT_ACTIVATE = `activate${EVENT_KEY}`
const EVENT_CLICK = `click${EVENT_KEY}`
const EVENT_LOAD_DATA_API = `load${EVENT_KEY}${DATA_API_KEY}`
const CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item'
const CLASS_NAME_ACTIVE = 'active'
const SELECTOR_DATA_SPY = '[data-bs-spy="scroll"]'
const SELECTOR_TARGET_LINKS = '[href]'
const SELECTOR_NAV_LIST_GROUP = '.nav, .list-group'
const SELECTOR_NAV_LINKS = '.nav-link'
const SELECTOR_NAV_ITEMS = '.nav-item'
const SELECTOR_LIST_ITEMS = '.list-group-item'
const SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`
const SELECTOR_DROPDOWN = '.dropdown'
const SELECTOR_DROPDOWN_TOGGLE = '.dropdown-toggle'
const Default = {
offset: null, // TODO: v6 @deprecated, keep it for backwards compatibility reasons
rootMargin: '0px 0px -25%',
smoothScroll: false,
target: null,
threshold: [0.1, 0.5, 1]
}
const DefaultType = {
offset: '(number|null)', // TODO v6 @deprecated, keep it for backwards compatibility reasons
rootMargin: 'string',
smoothScroll: 'boolean',
target: 'element',
threshold: 'array'
}
/**
* Class definition
*/
class ScrollSpy extends BaseComponent {
constructor(element, config) {
super(element, config)
// this._element is the observablesContainer and config.target the menu links wrapper
this._targetLinks = new Map()
this._observableSections = new Map()
this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element
this._activeTarget = null
this._observer = null
this._previousScrollData = {
visibleEntryTop: 0,
parentScrollTop: 0
}
this.refresh() // initialize
}
// Getters
static get Default() {
return Default
}
static get DefaultType() {
return DefaultType
}
static get NAME() {
return NAME
}
// Public
refresh() {
this._initializeTargetsAndObservables()
this._maybeEnableSmoothScroll()
if (this._observer) {
this._observer.disconnect()
} else {
this._observer = this._getNewObserver()
}
for (const section of this._observableSections.values()) {
this._observer.observe(section)
}
}
dispose() {
this._observer.disconnect()
super.dispose()
}
// Private
_configAfterMerge(config) {
// TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case
config.target = getElement(config.target) || document.body
// TODO: v6 Only for backwards compatibility reasons. Use rootMargin only
config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin
if (typeof config.threshold === 'string') {
config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value))
}
return config
}
_maybeEnableSmoothScroll() {
if (!this._config.smoothScroll) {
return
}
// unregister any previous listeners
EventHandler.off(this._config.target, EVENT_CLICK)
EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {
const observableSection = this._observableSections.get(event.target.hash)
if (observableSection) {
event.preventDefault()
const root = this._rootElement || window
const height = observableSection.offsetTop - this._element.offsetTop
if (root.scrollTo) {
root.scrollTo({ top: height, behavior: 'smooth' })
return
}
// Chrome 60 doesn't support `scrollTo`
root.scrollTop = height
}
})
}
_getNewObserver() {
const options = {
root: this._rootElement,
threshold: this._config.threshold,
rootMargin: this._config.rootMargin
}
return new IntersectionObserver(entries => this._observerCallback(entries), options)
}
// The logic of selection
_observerCallback(entries) {
const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`)
const activate = entry => {
this._previousScrollData.visibleEntryTop = entry.target.offsetTop
this._process(targetElement(entry))
}
const parentScrollTop = (this._rootElement || document.documentElement).scrollTop
const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop
this._previousScrollData.parentScrollTop = parentScrollTop
for (const entry of entries) {
if (!entry.isIntersecting) {
this._activeTarget = null
this._clearActiveClass(targetElement(entry))
continue
}
const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop
// if we are scrolling down, pick the bigger offsetTop
if (userScrollsDown && entryIsLowerThanPrevious) {
activate(entry)
// if parent isn't scrolled, let's keep the first visible item, breaking the iteration
if (!parentScrollTop) {
return
}
continue
}
// if we are scrolling up, pick the smallest offsetTop
if (!userScrollsDown && !entryIsLowerThanPrevious) {
activate(entry)
}
}
}
_initializeTargetsAndObservables() {
this._targetLinks = new Map()
this._observableSections = new Map()
const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target)
for (const anchor of targetLinks) {
// ensure that the anchor has an id and is not disabled
if (!anchor.hash || isDisabled(anchor)) {
continue
}
const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element)
// ensure that the observableSection exists & is visible
if (isVisible(observableSection)) {
this._targetLinks.set(decodeURI(anchor.hash), anchor)
this._observableSections.set(anchor.hash, observableSection)
}
}
}
_process(target) {
if (this._activeTarget === target) {
return
}
this._clearActiveClass(this._config.target)
this._activeTarget = target
target.classList.add(CLASS_NAME_ACTIVE)
this._activateParents(target)
EventHandler.trigger(this._element, EVENT_ACTIVATE, { relatedTarget: target })
}
_activateParents(target) {
// Activate dropdown parents
if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {
SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE, target.closest(SELECTOR_DROPDOWN))
.classList.add(CLASS_NAME_ACTIVE)
return
}
for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {
// Set triggered links parents as active
// With both <ul> and <nav> markup a parent is the previous sibling of any nav ancestor
for (const item of SelectorEngine.prev(listGroup, SELECTOR_LINK_ITEMS)) {
item.classList.add(CLASS_NAME_ACTIVE)
}
}
}
_clearActiveClass(parent) {
parent.classList.remove(CLASS_NAME_ACTIVE)
const activeNodes = SelectorEngine.find(`${SELECTOR_TARGET_LINKS}.${CLASS_NAME_ACTIVE}`, parent)
for (const node of activeNodes) {
node.classList.remove(CLASS_NAME_ACTIVE)
}
}
// Static
static jQueryInterface(config) {
return this.each(function () {
const data = ScrollSpy.getOrCreateInstance(this, config)
if (typeof config !== 'string') {
return
}
if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {
throw new TypeError(`No method named "${config}"`)
}
data[config]()
})
}
}
/**
* Data API implementation
*/
EventHandler.on(window, EVENT_LOAD_DATA_API, () => {
for (const spy of SelectorEngine.find(SELECTOR_DATA_SPY)) {
ScrollSpy.getOrCreateInstance(spy)
}
})
/**
* jQuery
*/
defineJQueryPlugin(ScrollSpy)
export default ScrollSpy
|
javascript
|
github
|
https://github.com/twbs/bootstrap
|
js/src/scrollspy.js
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2014 by the Free Software Foundation, Inc.
#
# This file is part of Postorius.
#
# Postorius is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Postorius is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Postorius. If not, see <http://www.gnu.org/licenses/>.
import logging
logger = logging.getLogger(__name__)
def postorius(request):
"""Add template variables to context.
"""
# Use a template so that the page header/footer is suppressed when
# requested via AJAX
if request.is_ajax():
template_to_extend = "postorius/base_ajax.html"
else:
template_to_extend = "postorius/base.html"
return {
'postorius_base_template': template_to_extend,
'request': request,
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""This is an integration test for the Dataproc-luigi binding.
This test requires credentials that can access GCS & access to a bucket below.
Follow the directions in the gcloud tools to set up local credentials.
"""
import unittest
try:
import google.auth
from luigi.contrib import dataproc
from googleapiclient import discovery
default_credentials, _ = google.auth.default()
default_client = discovery.build('dataproc', 'v1', cache_discovery=False, credentials=default_credentials)
dataproc.set_dataproc_client(default_client)
except ImportError:
raise unittest.SkipTest('Unable to load google cloud dependencies')
import luigi
import os
import time
from nose.plugins.attrib import attr
# In order to run this test, you should set these to your GCS project.
# Unfortunately there's no mock
PROJECT_ID = os.environ.get('DATAPROC_TEST_PROJECT_ID', 'your_project_id_here')
CLUSTER_NAME = os.environ.get('DATAPROC_TEST_CLUSTER', 'unit-test-cluster')
REGION = os.environ.get('DATAPROC_REGION', 'global')
IMAGE_VERSION = '1-0'
class _DataprocBaseTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@attr('gcloud')
class DataprocTaskTest(_DataprocBaseTestCase):
def test_1_create_cluster(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'CreateDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
def test_2_create_cluster_should_notice_existing_cluster_and_return_immediately(self):
job_start = time.time()
success = luigi.run(['--local-scheduler',
'--no-lock',
'CreateDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
self.assertLess(time.time() - job_start, 3)
def test_3_submit_minimal_job(self):
# The job itself will fail because the job files don't exist
# We don't care, because then we would be testing spark
# We care the job was submitted correctly, so that's what we test
luigi.run(['--local-scheduler',
'--no-lock',
'DataprocSparkTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME,
'--main-class=my.MinimalMainClass'])
response = dataproc.get_dataproc_client().projects().regions().jobs() \
.list(projectId=PROJECT_ID, region=REGION, clusterName=CLUSTER_NAME).execute()
lastJob = response['jobs'][0]['sparkJob']
self.assertEqual(lastJob['mainClass'], "my.MinimalMainClass")
def test_4_submit_spark_job(self):
# The job itself will fail because the job files don't exist
# We don't care, because then we would be testing spark
# We care the job was submitted correctly, so that's what we test
luigi.run(['--local-scheduler',
'--no-lock',
'DataprocSparkTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME,
'--main-class=my.MainClass',
'--jars=one.jar,two.jar',
'--job-args=foo,bar'])
response = dataproc.get_dataproc_client().projects().regions().jobs() \
.list(projectId=PROJECT_ID, region=REGION, clusterName=CLUSTER_NAME).execute()
lastJob = response['jobs'][0]['sparkJob']
self.assertEqual(lastJob['mainClass'], "my.MainClass")
self.assertEqual(lastJob['jarFileUris'], ["one.jar", "two.jar"])
self.assertEqual(lastJob['args'], ["foo", "bar"])
def test_5_submit_pyspark_job(self):
# The job itself will fail because the job files don't exist
# We don't care, because then we would be testing pyspark
# We care the job was submitted correctly, so that's what we test
luigi.run(['--local-scheduler',
'--no-lock',
'DataprocPysparkTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME,
'--job-file=main_job.py',
'--extra-files=extra1.py,extra2.py',
'--job-args=foo,bar'])
response = dataproc.get_dataproc_client().projects().regions().jobs()\
.list(projectId=PROJECT_ID, region=REGION, clusterName=CLUSTER_NAME).execute()
lastJob = response['jobs'][0]['pysparkJob']
self.assertEqual(lastJob['mainPythonFileUri'], "main_job.py")
self.assertEqual(lastJob['pythonFileUris'], ["extra1.py", "extra2.py"])
self.assertEqual(lastJob['args'], ["foo", "bar"])
def test_6_delete_cluster(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'DeleteDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
def test_7_delete_cluster_should_return_immediately_if_no_cluster(self):
job_start = time.time()
success = luigi.run(['--local-scheduler',
'--no-lock',
'DeleteDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME])
self.assertTrue(success)
self.assertLess(time.time() - job_start, 3)
def test_8_create_cluster_image_version(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'CreateDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME + '-' + IMAGE_VERSION,
'--image-version=1.0'])
self.assertTrue(success)
def test_9_delete_cluster_image_version(self):
success = luigi.run(['--local-scheduler',
'--no-lock',
'DeleteDataprocClusterTask',
'--gcloud-project-id=' + PROJECT_ID,
'--dataproc-cluster-name=' + CLUSTER_NAME + '-' + IMAGE_VERSION])
self.assertTrue(success)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance WebDAV Client
"""
import time
from oslo_log import log
from six.moves import http_client
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE
LOG = log.getLogger(__name__)
bad_gateway_err = _('Check the state of the http service. Also ensure that '
'the https port number is the same as the one specified '
'in cinder.conf.')
WebDAVHTTPErrors = {
http_client.UNAUTHORIZED: _('User not authorized to perform WebDAV '
'operations.'),
http_client.BAD_GATEWAY: bad_gateway_err,
http_client.FORBIDDEN: _('Check access permissions for the ZFS share '
'assigned to this driver.'),
http_client.NOT_FOUND: _('The source volume for this WebDAV operation not '
'found.'),
http_client.INSUFFICIENT_STORAGE: _('Not enough storage space in the ZFS '
'share to perform this operation.')
}
WebDAVErrors = {
'BadStatusLine': _('http service may have been abruptly disabled or put '
'to maintenance state in the middle of this '
'operation.'),
'Bad_Gateway': bad_gateway_err
}
propertyupdate_data = """<?xml version="1.0"?>
<D:propertyupdate xmlns:D="DAV:">
<D:set>
<D:prop>
<D:prop_name>prop_val</D:prop_name>
</D:prop>
</D:set>
</D:propertyupdate>"""
class ZFSSAWebDAVClient(object):
def __init__(self, url, auth_str, **kwargs):
"""Initialize WebDAV Client"""
self.https_path = url
self.auth_str = auth_str
def _lookup_error(self, error):
msg = ''
if error in http_client.responses:
msg = http_client.responses[error]
if error in WebDAVHTTPErrors:
msg = WebDAVHTTPErrors[error]
elif error in WebDAVErrors:
msg = WebDAVErrors[error]
return msg
def build_data(self, data, propname, value):
res = data.replace('prop_name', propname)
res = res.replace('prop_val', value)
return res
def set_file_prop(self, filename, propname, propval):
data = self.build_data(propertyupdate_data, propname, propval)
return self.request(src_file=filename, data=data, method='PROPPATCH')
def request(self, src_file="", dst_file="", method="", maxretries=10,
data=""):
retry = 0
src_url = self.https_path + "/" + src_file
dst_url = self.https_path + "/" + dst_file
request = urllib.request.Request(url=src_url, data=data)
if dst_file != "":
request.add_header('Destination', dst_url)
if method == "PROPPATCH":
request.add_header('Translate', 'F')
request.add_header("Authorization", "Basic %s" % self.auth_str)
request.get_method = lambda: method
LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s',
{'method': method, 'src': src_url, 'des': dst_url})
while retry < maxretries:
try:
response = urllib.request.urlopen(request, timeout=None)
except urllib.error.HTTPError as err:
LOG.error(_LE('WebDAV returned with %(code)s error during '
'%(method)s call.'),
{'code': err.code, 'method': method})
if err.code == http_client.INTERNAL_SERVER_ERROR:
LOG.error(_LE('WebDAV operation failed with error code: '
'%(code)s reason: %(reason)s Retry attempt '
'%(retry)s in progress.'),
{'code': err.code,
'reason': err.reason,
'retry': retry})
if retry < maxretries:
retry += 1
time.sleep(1)
continue
msg = self._lookup_error(err.code)
raise exception.WebDAVClientError(msg=msg, code=err.code,
src=src_file, dst=dst_file,
method=method)
except http_client.BadStatusLine as err:
msg = self._lookup_error('BadStatusLine')
code = 'http_client.BadStatusLine'
raise exception.WebDAVClientError(msg=msg,
code=code,
src=src_file, dst=dst_file,
method=method)
except urllib.error.URLError as err:
reason = ''
if getattr(err, 'reason'):
reason = err.reason
msg = self._lookup_error('Bad_Gateway')
raise exception.WebDAVClientError(msg=msg,
code=reason, src=src_file,
dst=dst_file, method=method)
break
return response
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {ChangeDetectionStrategy, Component, input} from '@angular/core';
type ButtonType = 'primary' | 'secondary' | 'icon';
type ButtonSize = 'standard' | 'mid' | 'compact';
@Component({
selector: 'button[ng-button]',
template: '<ng-content/>',
styleUrl: './button.component.scss',
changeDetection: ChangeDetectionStrategy.OnPush,
host: {
class: 'ng-button',
'[class.type-primary]': `btnType() === 'primary'`,
'[class.type-secondary]': `btnType() === 'secondary'`,
'[class.type-icon]': `btnType() === 'icon'`,
'[class.size-compact]': `size() === 'compact'`,
'[class.size-mid]': `size() === 'mid'`,
},
})
export class ButtonComponent {
readonly btnType = input<ButtonType>('primary');
readonly size = input<ButtonSize>('standard');
}
|
typescript
|
github
|
https://github.com/angular/angular
|
devtools/projects/ng-devtools/src/lib/shared/button/button.component.ts
|
# -*- test-case-name: twisted.python.test.test_runtime -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import os
import sys
import time
import imp
import warnings
from twisted.python import compat
if compat._PY3:
_threadModule = "_thread"
else:
_threadModule = "thread"
def shortPythonVersion():
"""
Returns the Python version as a dot-separated string.
"""
return "%s.%s.%s" % sys.version_info[:3]
knownPlatforms = {
'nt': 'win32',
'ce': 'win32',
'posix': 'posix',
'java': 'java',
'org.python.modules.os': 'java',
}
_timeFunctions = {
#'win32': time.clock,
'win32': time.time,
}
class Platform:
"""
Gives us information about the platform we're running on.
"""
type = knownPlatforms.get(os.name)
seconds = staticmethod(_timeFunctions.get(type, time.time))
_platform = sys.platform
def __init__(self, name=None, platform=None):
if name is not None:
self.type = knownPlatforms.get(name)
self.seconds = _timeFunctions.get(self.type, time.time)
if platform is not None:
self._platform = platform
def isKnown(self):
"""
Do we know about this platform?
@return: Boolean indicating whether this is a known platform or not.
@rtype: C{bool}
"""
return self.type != None
def getType(self):
"""
Get platform type.
@return: Either 'posix', 'win32' or 'java'
@rtype: C{str}
"""
return self.type
def isMacOSX(self):
"""
Check if current platform is Mac OS X.
@return: C{True} if the current platform has been detected as OS X.
@rtype: C{bool}
"""
return self._platform == "darwin"
def isWinNT(self):
"""
Are we running in Windows NT?
This is deprecated and always returns C{True} on win32 because
Twisted only supports Windows NT-derived platforms at this point.
@return: C{True} if the current platform has been detected as
Windows NT.
@rtype: C{bool}
"""
warnings.warn(
"twisted.python.runtime.Platform.isWinNT was deprecated in "
"Twisted 13.0. Use Platform.isWindows instead.",
DeprecationWarning, stacklevel=2)
return self.isWindows()
def isWindows(self):
"""
Are we running in Windows?
@return: C{True} if the current platform has been detected as
Windows.
@rtype: C{bool}
"""
return self.getType() == 'win32'
def isVista(self):
"""
Check if current platform is Windows Vista or Windows Server 2008.
@return: C{True} if the current platform has been detected as Vista
@rtype: C{bool}
"""
if getattr(sys, "getwindowsversion", None) is not None:
return sys.getwindowsversion()[0] == 6
else:
return False
def isLinux(self):
"""
Check if current platform is Linux.
@return: C{True} if the current platform has been detected as Linux.
@rtype: C{bool}
"""
return self._platform.startswith("linux")
def supportsThreads(self):
"""
Can threads be created?
@return: C{True} if the threads are supported on the current platform.
@rtype: C{bool}
"""
try:
return imp.find_module(_threadModule)[0] is None
except ImportError:
return False
def supportsINotify(self):
"""
Return C{True} if we can use the inotify API on this platform.
@since: 10.1
"""
try:
from twisted.python._inotify import INotifyError, init
except ImportError:
return False
try:
os.close(init())
except INotifyError:
return False
return True
platform = Platform()
platformType = platform.getType()
seconds = platform.seconds
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/hwmon/ti,lm87.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments LM87 Hardware Monitor
maintainers:
- Javier Carrasco <javier.carrasco.cruz@gmail.com>
description: |
The LM87 is a serial interface system hardware monitor
with remote diode temperature sensing.
Datasheets:
https://www.ti.com/product/LM87
properties:
compatible:
enum:
- adi,adm1024
- ti,lm87
reg:
maxItems: 1
has-temp3:
$ref: /schemas/types.yaml#/definitions/flag
description:
This configures pins 18 and 19 to be used as a second
remote temperature sensing channel. By default the pins
are configured as voltage input pins in0 and in5.
has-in6:
$ref: /schemas/types.yaml#/definitions/flag
description:
When set, pin 5 is configured to be used as voltage input
in6. Otherwise the pin is set as FAN1 input.
has-in7:
$ref: /schemas/types.yaml#/definitions/flag
description:
When set, pin 6 is configured to be used as voltage input
in7. Otherwise the pin is set as FAN2 input.
vcc-supply:
description:
Regulator supplying power, can be configured to measure
5.0V power supply. Default is 3.3V.
required:
- compatible
- reg
additionalProperties: false
examples:
- |
i2c {
#address-cells = <1>;
#size-cells = <0>;
hwmon@2e {
compatible = "ti,lm87";
reg = <0x2e>;
has-temp3;
vcc-supply = <®_5v0>;
};
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
|
# -*- test-case-name: twisted.news.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
News server backend implementations.
"""
import getpass, pickle, time, socket
import os
import StringIO
from hashlib import md5
from email.Message import Message
from email.Generator import Generator
from zope.interface import implementer, Interface
from twisted.news.nntp import NNTPError
from twisted.mail import smtp
from twisted.internet import defer
from twisted.enterprise import adbapi
from twisted.persisted import dirdbm
ERR_NOGROUP, ERR_NOARTICLE = range(2, 4) # XXX - put NNTP values here (I guess?)
OVERVIEW_FMT = [
'Subject', 'From', 'Date', 'Message-ID', 'References',
'Bytes', 'Lines', 'Xref'
]
def hexdigest(md5): #XXX: argh. 1.5.2 doesn't have this.
return ''.join(map(lambda x: hex(ord(x))[2:], md5.digest()))
class Article:
def __init__(self, head, body):
self.body = body
self.headers = {}
header = None
for line in head.split('\r\n'):
if line[0] in ' \t':
i = list(self.headers[header])
i[1] += '\r\n' + line
else:
i = line.split(': ', 1)
header = i[0].lower()
self.headers[header] = tuple(i)
if not self.getHeader('Message-ID'):
s = str(time.time()) + self.body
id = hexdigest(md5(s)) + '@' + socket.gethostname()
self.putHeader('Message-ID', '<%s>' % id)
if not self.getHeader('Bytes'):
self.putHeader('Bytes', str(len(self.body)))
if not self.getHeader('Lines'):
self.putHeader('Lines', str(self.body.count('\n')))
if not self.getHeader('Date'):
self.putHeader('Date', time.ctime(time.time()))
def getHeader(self, header):
h = header.lower()
if h in self.headers:
return self.headers[h][1]
else:
return ''
def putHeader(self, header, value):
self.headers[header.lower()] = (header, value)
def textHeaders(self):
headers = []
for i in self.headers.values():
headers.append('%s: %s' % i)
return '\r\n'.join(headers) + '\r\n'
def overview(self):
xover = []
for i in OVERVIEW_FMT:
xover.append(self.getHeader(i))
return xover
class NewsServerError(Exception):
pass
class INewsStorage(Interface):
"""
An interface for storing and requesting news articles
"""
def listRequest():
"""
Returns a deferred whose callback will be passed a list of 4-tuples
containing (name, max index, min index, flags) for each news group
"""
def subscriptionRequest():
"""
Returns a deferred whose callback will be passed the list of
recommended subscription groups for new server users
"""
def postRequest(message):
"""
Returns a deferred whose callback will be invoked if 'message'
is successfully posted to one or more specified groups and
whose errback will be invoked otherwise.
"""
def overviewRequest():
"""
Returns a deferred whose callback will be passed the a list of
headers describing this server's overview format.
"""
def xoverRequest(group, low, high):
"""
Returns a deferred whose callback will be passed a list of xover
headers for the given group over the given range. If low is None,
the range starts at the first article. If high is None, the range
ends at the last article.
"""
def xhdrRequest(group, low, high, header):
"""
Returns a deferred whose callback will be passed a list of XHDR data
for the given group over the given range. If low is None,
the range starts at the first article. If high is None, the range
ends at the last article.
"""
def listGroupRequest(group):
"""
Returns a deferred whose callback will be passed a two-tuple of
(group name, [article indices])
"""
def groupRequest(group):
"""
Returns a deferred whose callback will be passed a five-tuple of
(group name, article count, highest index, lowest index, group flags)
"""
def articleExistsRequest(id):
"""
Returns a deferred whose callback will be passed with a true value
if a message with the specified Message-ID exists in the database
and with a false value otherwise.
"""
def articleRequest(group, index, id = None):
"""
Returns a deferred whose callback will be passed a file-like object
containing the full article text (headers and body) for the article
of the specified index in the specified group, and whose errback
will be invoked if the article or group does not exist. If id is
not None, index is ignored and the article with the given Message-ID
will be returned instead, along with its index in the specified
group.
"""
def headRequest(group, index):
"""
Returns a deferred whose callback will be passed the header for
the article of the specified index in the specified group, and
whose errback will be invoked if the article or group does not
exist.
"""
def bodyRequest(group, index):
"""
Returns a deferred whose callback will be passed the body for
the article of the specified index in the specified group, and
whose errback will be invoked if the article or group does not
exist.
"""
class NewsStorage:
"""
Backwards compatibility class -- There is no reason to inherit from this,
just implement INewsStorage instead.
"""
def listRequest(self):
raise NotImplementedError()
def subscriptionRequest(self):
raise NotImplementedError()
def postRequest(self, message):
raise NotImplementedError()
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
raise NotImplementedError()
def xhdrRequest(self, group, low, high, header):
raise NotImplementedError()
def listGroupRequest(self, group):
raise NotImplementedError()
def groupRequest(self, group):
raise NotImplementedError()
def articleExistsRequest(self, id):
raise NotImplementedError()
def articleRequest(self, group, index, id = None):
raise NotImplementedError()
def headRequest(self, group, index):
raise NotImplementedError()
def bodyRequest(self, group, index):
raise NotImplementedError()
class _ModerationMixin:
"""
Storage implementations can inherit from this class to get the easy-to-use
C{notifyModerators} method which will take care of sending messages which
require moderation to a list of moderators.
"""
sendmail = staticmethod(smtp.sendmail)
def notifyModerators(self, moderators, article):
"""
Send an article to a list of group moderators to be moderated.
@param moderators: A C{list} of C{str} giving RFC 2821 addresses of
group moderators to notify.
@param article: The article requiring moderation.
@type article: L{Article}
@return: A L{Deferred} which fires with the result of sending the email.
"""
# Moderated postings go through as long as they have an Approved
# header, regardless of what the value is
group = article.getHeader('Newsgroups')
subject = article.getHeader('Subject')
if self._sender is None:
# This case should really go away. This isn't a good default.
sender = 'twisted-news@' + socket.gethostname()
else:
sender = self._sender
msg = Message()
msg['Message-ID'] = smtp.messageid()
msg['From'] = sender
msg['To'] = ', '.join(moderators)
msg['Subject'] = 'Moderate new %s message: %s' % (group, subject)
msg['Content-Type'] = 'message/rfc822'
payload = Message()
for header, value in article.headers.values():
payload.add_header(header, value)
payload.set_payload(article.body)
msg.attach(payload)
out = StringIO.StringIO()
gen = Generator(out, False)
gen.flatten(msg)
msg = out.getvalue()
return self.sendmail(self._mailhost, sender, moderators, msg)
@implementer(INewsStorage)
class PickleStorage(_ModerationMixin):
"""
A trivial NewsStorage implementation using pickles
Contains numerous flaws and is generally unsuitable for any
real applications. Consider yourself warned!
"""
sharedDBs = {}
def __init__(self, filename, groups=None, moderators=(),
mailhost=None, sender=None):
"""
@param mailhost: A C{str} giving the mail exchange host which will
accept moderation emails from this server. Must accept emails
destined for any address specified as a moderator.
@param sender: A C{str} giving the address which will be used as the
sender of any moderation email generated by this server.
"""
self.datafile = filename
self.load(filename, groups, moderators)
self._mailhost = mailhost
self._sender = sender
def getModerators(self, groups):
# first see if any groups are moderated. if so, nothing gets posted,
# but the whole messages gets forwarded to the moderator address
moderators = []
for group in groups:
moderators.extend(self.db['moderators'].get(group, None))
return filter(None, moderators)
def listRequest(self):
"Returns a list of 4-tuples: (name, max index, min index, flags)"
l = self.db['groups']
r = []
for i in l:
if len(self.db[i].keys()):
low = min(self.db[i].keys())
high = max(self.db[i].keys()) + 1
else:
low = high = 0
if i in self.db['moderators']:
flags = 'm'
else:
flags = 'y'
r.append((i, high, low, flags))
return defer.succeed(r)
def subscriptionRequest(self):
return defer.succeed(['alt.test'])
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
a = Article(headers, article)
groups = a.getHeader('Newsgroups').split()
xref = []
# Check moderated status
moderators = self.getModerators(groups)
if moderators and not a.getHeader('Approved'):
return self.notifyModerators(moderators, a)
for group in groups:
if group in self.db:
if len(self.db[group].keys()):
index = max(self.db[group].keys()) + 1
else:
index = 1
xref.append((group, str(index)))
self.db[group][index] = a
if len(xref) == 0:
return defer.fail(None)
a.putHeader('Xref', '%s %s' % (
socket.gethostname().split()[0],
''.join(map(lambda x: ':'.join(x), xref))
))
self.flush()
return defer.succeed(None)
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
if group not in self.db:
return defer.succeed([])
r = []
for i in self.db[group].keys():
if (low is None or i >= low) and (high is None or i <= high):
r.append([str(i)] + self.db[group][i].overview())
return defer.succeed(r)
def xhdrRequest(self, group, low, high, header):
if group not in self.db:
return defer.succeed([])
r = []
for i in self.db[group].keys():
if low is None or i >= low and high is None or i <= high:
r.append((i, self.db[group][i].getHeader(header)))
return defer.succeed(r)
def listGroupRequest(self, group):
if group in self.db:
return defer.succeed((group, self.db[group].keys()))
else:
return defer.fail(None)
def groupRequest(self, group):
if group in self.db:
if len(self.db[group].keys()):
num = len(self.db[group].keys())
low = min(self.db[group].keys())
high = max(self.db[group].keys())
else:
num = low = high = 0
flags = 'y'
return defer.succeed((group, num, high, low, flags))
else:
return defer.fail(ERR_NOGROUP)
def articleExistsRequest(self, id):
for group in self.db['groups']:
for a in self.db[group].values():
if a.getHeader('Message-ID') == id:
return defer.succeed(1)
return defer.succeed(0)
def articleRequest(self, group, index, id = None):
if id is not None:
raise NotImplementedError
if group in self.db:
if index in self.db[group]:
a = self.db[group][index]
return defer.succeed((
index,
a.getHeader('Message-ID'),
StringIO.StringIO(a.textHeaders() + '\r\n' + a.body)
))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def headRequest(self, group, index):
if group in self.db:
if index in self.db[group]:
a = self.db[group][index]
return defer.succeed((index, a.getHeader('Message-ID'), a.textHeaders()))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def bodyRequest(self, group, index):
if group in self.db:
if index in self.db[group]:
a = self.db[group][index]
return defer.succeed((index, a.getHeader('Message-ID'), StringIO.StringIO(a.body)))
else:
return defer.fail(ERR_NOARTICLE)
else:
return defer.fail(ERR_NOGROUP)
def flush(self):
with open(self.datafile, 'w') as f:
pickle.dump(self.db, f)
def load(self, filename, groups = None, moderators = ()):
if filename in PickleStorage.sharedDBs:
self.db = PickleStorage.sharedDBs[filename]
else:
try:
with open(filename) as f:
self.db = pickle.load(f)
PickleStorage.sharedDBs[filename] = self.db
except IOError:
self.db = PickleStorage.sharedDBs[filename] = {}
self.db['groups'] = groups
if groups is not None:
for i in groups:
self.db[i] = {}
self.db['moderators'] = dict(moderators)
self.flush()
class Group:
name = None
flags = ''
minArticle = 1
maxArticle = 0
articles = None
def __init__(self, name, flags = 'y'):
self.name = name
self.flags = flags
self.articles = {}
@implementer(INewsStorage)
class NewsShelf(_ModerationMixin):
"""
A NewStorage implementation using Twisted's dirdbm persistence module.
"""
def __init__(self, mailhost, path, sender=None):
"""
@param mailhost: A C{str} giving the mail exchange host which will
accept moderation emails from this server. Must accept emails
destined for any address specified as a moderator.
@param sender: A C{str} giving the address which will be used as the
sender of any moderation email generated by this server.
"""
self.path = path
self._mailhost = self.mailhost = mailhost
self._sender = sender
if not os.path.exists(path):
os.mkdir(path)
self.dbm = dirdbm.Shelf(os.path.join(path, "newsshelf"))
if not len(self.dbm.keys()):
self.initialize()
def initialize(self):
# A dictionary of group name/Group instance items
self.dbm['groups'] = dirdbm.Shelf(os.path.join(self.path, 'groups'))
# A dictionary of group name/email address
self.dbm['moderators'] = dirdbm.Shelf(os.path.join(self.path, 'moderators'))
# A list of group names
self.dbm['subscriptions'] = []
# A dictionary of MessageID strings/xref lists
self.dbm['Message-IDs'] = dirdbm.Shelf(os.path.join(self.path, 'Message-IDs'))
def addGroup(self, name, flags):
self.dbm['groups'][name] = Group(name, flags)
def addSubscription(self, name):
self.dbm['subscriptions'] = self.dbm['subscriptions'] + [name]
def addModerator(self, group, email):
self.dbm['moderators'][group] = email
def listRequest(self):
result = []
for g in self.dbm['groups'].values():
result.append((g.name, g.maxArticle, g.minArticle, g.flags))
return defer.succeed(result)
def subscriptionRequest(self):
return defer.succeed(self.dbm['subscriptions'])
def getModerator(self, groups):
# first see if any groups are moderated. if so, nothing gets posted,
# but the whole messages gets forwarded to the moderator address
for group in groups:
try:
return self.dbm['moderators'][group]
except KeyError:
pass
return None
def notifyModerator(self, moderator, article):
"""
Notify a single moderator about an article requiring moderation.
C{notifyModerators} should be preferred.
"""
return self.notifyModerators([moderator], article)
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
article = Article(headers, article)
groups = article.getHeader('Newsgroups').split()
xref = []
# Check for moderated status
moderator = self.getModerator(groups)
if moderator and not article.getHeader('Approved'):
return self.notifyModerators([moderator], article)
for group in groups:
try:
g = self.dbm['groups'][group]
except KeyError:
pass
else:
index = g.maxArticle + 1
g.maxArticle += 1
g.articles[index] = article
xref.append((group, str(index)))
self.dbm['groups'][group] = g
if not xref:
return defer.fail(NewsServerError("No groups carried: " + ' '.join(groups)))
article.putHeader('Xref', '%s %s' % (socket.gethostname().split()[0], ' '.join(map(lambda x: ':'.join(x), xref))))
self.dbm['Message-IDs'][article.getHeader('Message-ID')] = xref
return defer.succeed(None)
def overviewRequest(self):
return defer.succeed(OVERVIEW_FMT)
def xoverRequest(self, group, low, high):
if group not in self.dbm['groups']:
return defer.succeed([])
if low is None:
low = 0
if high is None:
high = self.dbm['groups'][group].maxArticle
r = []
for i in range(low, high + 1):
if i in self.dbm['groups'][group].articles:
r.append([str(i)] + self.dbm['groups'][group].articles[i].overview())
return defer.succeed(r)
def xhdrRequest(self, group, low, high, header):
if group not in self.dbm['groups']:
return defer.succeed([])
if low is None:
low = 0
if high is None:
high = self.dbm['groups'][group].maxArticle
r = []
for i in range(low, high + 1):
if i in self.dbm['groups'][group].articles:
r.append((i, self.dbm['groups'][group].articles[i].getHeader(header)))
return defer.succeed(r)
def listGroupRequest(self, group):
if group in self.dbm['groups']:
return defer.succeed((group, self.dbm['groups'][group].articles.keys()))
return defer.fail(NewsServerError("No such group: " + group))
def groupRequest(self, group):
try:
g = self.dbm['groups'][group]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
flags = g.flags
low = g.minArticle
high = g.maxArticle
num = high - low + 1
return defer.succeed((group, num, high, low, flags))
def articleExistsRequest(self, id):
return defer.succeed(id in self.dbm['Message-IDs'])
def articleRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((
index,
a.getHeader('Message-ID'),
StringIO.StringIO(a.textHeaders() + '\r\n' + a.body)
))
def headRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((index, a.getHeader('Message-ID'), a.textHeaders()))
def bodyRequest(self, group, index, id = None):
if id is not None:
try:
xref = self.dbm['Message-IDs'][id]
except KeyError:
return defer.fail(NewsServerError("No such article: " + id))
else:
group, index = xref[0]
index = int(index)
try:
a = self.dbm['groups'][group].articles[index]
except KeyError:
return defer.fail(NewsServerError("No such group: " + group))
else:
return defer.succeed((index, a.getHeader('Message-ID'), StringIO.StringIO(a.body)))
@implementer(INewsStorage)
class NewsStorageAugmentation:
"""
A NewsStorage implementation using Twisted's asynchronous DB-API
"""
schema = """
CREATE TABLE groups (
group_id SERIAL,
name VARCHAR(80) NOT NULL,
flags INTEGER DEFAULT 0 NOT NULL
);
CREATE UNIQUE INDEX group_id_index ON groups (group_id);
CREATE UNIQUE INDEX name_id_index ON groups (name);
CREATE TABLE articles (
article_id SERIAL,
message_id TEXT,
header TEXT,
body TEXT
);
CREATE UNIQUE INDEX article_id_index ON articles (article_id);
CREATE UNIQUE INDEX article_message_index ON articles (message_id);
CREATE TABLE postings (
group_id INTEGER,
article_id INTEGER,
article_index INTEGER NOT NULL
);
CREATE UNIQUE INDEX posting_article_index ON postings (article_id);
CREATE TABLE subscriptions (
group_id INTEGER
);
CREATE TABLE overview (
header TEXT
);
"""
def __init__(self, info):
self.info = info
self.dbpool = adbapi.ConnectionPool(**self.info)
def __setstate__(self, state):
self.__dict__ = state
self.info['password'] = getpass.getpass('Database password for %s: ' % (self.info['user'],))
self.dbpool = adbapi.ConnectionPool(**self.info)
del self.info['password']
def listRequest(self):
# COALESCE may not be totally portable
# it is shorthand for
# CASE WHEN (first parameter) IS NOT NULL then (first parameter) ELSE (second parameter) END
sql = """
SELECT groups.name,
COALESCE(MAX(postings.article_index), 0),
COALESCE(MIN(postings.article_index), 0),
groups.flags
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
GROUP BY groups.name, groups.flags
ORDER BY groups.name
"""
return self.dbpool.runQuery(sql)
def subscriptionRequest(self):
sql = """
SELECT groups.name FROM groups,subscriptions WHERE groups.group_id = subscriptions.group_id
"""
return self.dbpool.runQuery(sql)
def postRequest(self, message):
cleave = message.find('\r\n\r\n')
headers, article = message[:cleave], message[cleave + 4:]
article = Article(headers, article)
return self.dbpool.runInteraction(self._doPost, article)
def _doPost(self, transaction, article):
# Get the group ids
groups = article.getHeader('Newsgroups').split()
if not len(groups):
raise NNTPError('Missing Newsgroups header')
sql = """
SELECT name, group_id FROM groups
WHERE name IN (%s)
""" % (', '.join([("'%s'" % (adbapi.safe(group),)) for group in groups]),)
transaction.execute(sql)
result = transaction.fetchall()
# No relevant groups, bye bye!
if not len(result):
raise NNTPError('None of groups in Newsgroup header carried')
# Got some groups, now find the indices this article will have in each
sql = """
SELECT groups.group_id, COALESCE(MAX(postings.article_index), 0) + 1
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
WHERE groups.group_id IN (%s)
GROUP BY groups.group_id
""" % (', '.join([("%d" % (id,)) for (group, id) in result]),)
transaction.execute(sql)
indices = transaction.fetchall()
if not len(indices):
raise NNTPError('Internal server error - no indices found')
# Associate indices with group names
gidToName = dict([(b, a) for (a, b) in result])
gidToIndex = dict(indices)
nameIndex = []
for i in gidToName:
nameIndex.append((gidToName[i], gidToIndex[i]))
# Build xrefs
xrefs = socket.gethostname().split()[0]
xrefs = xrefs + ' ' + ' '.join([('%s:%d' % (group, id)) for (group, id) in nameIndex])
article.putHeader('Xref', xrefs)
# Hey! The article is ready to be posted! God damn f'in finally.
sql = """
INSERT INTO articles (message_id, header, body)
VALUES ('%s', '%s', '%s')
""" % (
adbapi.safe(article.getHeader('Message-ID')),
adbapi.safe(article.textHeaders()),
adbapi.safe(article.body)
)
transaction.execute(sql)
# Now update the posting to reflect the groups to which this belongs
for gid in gidToName:
sql = """
INSERT INTO postings (group_id, article_id, article_index)
VALUES (%d, (SELECT last_value FROM articles_article_id_seq), %d)
""" % (gid, gidToIndex[gid])
transaction.execute(sql)
return len(nameIndex)
def overviewRequest(self):
sql = """
SELECT header FROM overview
"""
return self.dbpool.runQuery(sql).addCallback(lambda result: [header[0] for header in result])
def xoverRequest(self, group, low, high):
sql = """
SELECT postings.article_index, articles.header
FROM articles,postings,groups
WHERE postings.group_id = groups.group_id
AND groups.name = '%s'
AND postings.article_id = articles.article_id
%s
%s
""" % (
adbapi.safe(group),
low is not None and "AND postings.article_index >= %d" % (low,) or "",
high is not None and "AND postings.article_index <= %d" % (high,) or ""
)
return self.dbpool.runQuery(sql).addCallback(
lambda results: [
[id] + Article(header, None).overview() for (id, header) in results
]
)
def xhdrRequest(self, group, low, high, header):
sql = """
SELECT articles.header
FROM groups,postings,articles
WHERE groups.name = '%s' AND postings.group_id = groups.group_id
AND postings.article_index >= %d
AND postings.article_index <= %d
""" % (adbapi.safe(group), low, high)
return self.dbpool.runQuery(sql).addCallback(
lambda results: [
(i, Article(h, None).getHeader(h)) for (i, h) in results
]
)
def listGroupRequest(self, group):
sql = """
SELECT postings.article_index FROM postings,groups
WHERE postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (adbapi.safe(group),)
return self.dbpool.runQuery(sql).addCallback(
lambda results, group = group: (group, [res[0] for res in results])
)
def groupRequest(self, group):
sql = """
SELECT groups.name,
COUNT(postings.article_index),
COALESCE(MAX(postings.article_index), 0),
COALESCE(MIN(postings.article_index), 0),
groups.flags
FROM groups LEFT OUTER JOIN postings
ON postings.group_id = groups.group_id
WHERE groups.name = '%s'
GROUP BY groups.name, groups.flags
""" % (adbapi.safe(group),)
return self.dbpool.runQuery(sql).addCallback(
lambda results: tuple(results[0])
)
def articleExistsRequest(self, id):
sql = """
SELECT COUNT(message_id) FROM articles
WHERE message_id = '%s'
""" % (adbapi.safe(id),)
return self.dbpool.runQuery(sql).addCallback(
lambda result: bool(result[0][0])
)
def articleRequest(self, group, index, id = None):
if id is not None:
sql = """
SELECT postings.article_index, articles.message_id, articles.header, articles.body
FROM groups,postings LEFT OUTER JOIN articles
ON articles.message_id = '%s'
WHERE groups.name = '%s'
AND groups.group_id = postings.group_id
""" % (adbapi.safe(id), adbapi.safe(group))
else:
sql = """
SELECT postings.article_index, articles.message_id, articles.header, articles.body
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(
lambda result: (
result[0][0],
result[0][1],
StringIO.StringIO(result[0][2] + '\r\n' + result[0][3])
)
)
def headRequest(self, group, index):
sql = """
SELECT postings.article_index, articles.message_id, articles.header
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(lambda result: result[0])
def bodyRequest(self, group, index):
sql = """
SELECT postings.article_index, articles.message_id, articles.body
FROM groups,articles LEFT OUTER JOIN postings
ON postings.article_id = articles.article_id
WHERE postings.article_index = %d
AND postings.group_id = groups.group_id
AND groups.name = '%s'
""" % (index, adbapi.safe(group))
return self.dbpool.runQuery(sql).addCallback(
lambda result: result[0]
).addCallback(
# result is a tuple of (index, id, body)
lambda result: (result[0], result[1], StringIO.StringIO(result[2]))
)
####
#### XXX - make these static methods some day
####
def makeGroupSQL(groups):
res = ''
for g in groups:
res = res + """\n INSERT INTO groups (name) VALUES ('%s');\n""" % (adbapi.safe(g),)
return res
def makeOverviewSQL():
res = ''
for o in OVERVIEW_FMT:
res = res + """\n INSERT INTO overview (header) VALUES ('%s');\n""" % (adbapi.safe(o),)
return res
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.conf import settings
from django.conf.urls import patterns, url
from django.views.decorators.cache import cache_page
from regulations.views.about import about
from regulations.views.chrome_breakaway import ChromeSXSView
from regulations.views.chrome import (
ChromeInterpView, ChromeLandingView, ChromeParagraphView,
ChromeRegulationView, ChromeSearchView, ChromeSectionView,
ChromeSubterpView)
from regulations.views.diff import ChromeSectionDiffView
from regulations.views.diff import PartialSectionDiffView
from regulations.views.partial import PartialDefinitionView
from regulations.views.partial import PartialParagraphView
from regulations.views.partial import PartialRegulationView, PartialSectionView
from regulations.views import partial_interp
from regulations.views.partial_search import PartialSearch
from regulations.views.partial_sxs import ParagraphSXSView
from regulations.views.redirect import diff_redirect, redirect_by_date
from regulations.views.redirect import redirect_by_date_get
from regulations.views.sidebar import SideBarView
from regulations.views.universal_landing import universal
#Re-usable URL patterns.
meta_version = r'(?P<%s>[-\d\w_]+)'
version_pattern = meta_version % 'version'
newer_version_pattern = meta_version % 'newer_version'
notice_pattern = meta_version % 'notice_id'
reg_pattern = r'(?P<label_id>[\d]+)'
section_pattern = r'(?P<label_id>[\d]+[-][\w]+)'
interp_pattern = r'(?P<label_id>[-\d\w]+[-]Interp)'
paragraph_pattern = r'(?P<label_id>[-\d\w]+)'
subterp_pattern = r'(?P<label_id>[\d]+-(Appendices|Subpart(-[A-Z]+)?)-Interp)'
lt_cache = cache_page(settings.CACHES['eregs_longterm_cache']['TIMEOUT'],
cache='eregs_longterm_cache')
urlpatterns = patterns(
'',
url(r'^$', universal, name='universal_landing'),
# about page
url(r'^about$', about, name='regulations_about'),
# Redirect to version by date (by GET)
# Example http://.../regulation_redirect/201-3-v
url(r'^regulation_redirect/%s$' % paragraph_pattern, redirect_by_date_get,
name='redirect_by_date_get'),
# Redirect to a diff based on GET params
# Example http://.../diff_redirect/201-3/old_version?new_version=new
url(r'^diff_redirect/%s/%s$' % (section_pattern, version_pattern),
diff_redirect, name='diff_redirect'),
#A section by section paragraph with chrome
#Example: http://.../sxs/201-2-g/2011-1738
url(r'^sxs/%s/%s$' % (paragraph_pattern, notice_pattern),
lt_cache(ChromeSXSView.as_view()),
name='chrome_sxs_view'),
# Search results for non-JS viewers
# Example: http://.../search?q=term&version=2011-1738
url(r'^search/%s$' % reg_pattern,
ChromeSearchView.as_view(),
name='chrome_search'),
# Diff view of a section for non-JS viewers (or book markers)
# Example: http://.../diff/201-4/2011-1738/2013-10704
url(r'^diff/%s/%s/%s$' %
(section_pattern, version_pattern, newer_version_pattern),
lt_cache(ChromeSectionDiffView.as_view()),
name='chrome_section_diff_view'),
# Redirect to version by date
# Example: http://.../201-3-v/1999/11/8
url(r'^%s/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})$'
% paragraph_pattern, redirect_by_date, name='redirect_by_date'),
#A regulation section with chrome
#Example: http://.../201-4/2013-10704
url(r'^%s/%s$' % (section_pattern, version_pattern),
lt_cache(ChromeSectionView.as_view()),
name='chrome_section_view'),
# Subterp, interpretations of a while subpart, emptypart or appendices
# Example: http://.../201-Subpart-A-Interp/2013-10706
# http://.../201-Subpart-Interp/2013-10706
# http://.../201-Appendices-Interp/2013-10706
url(r'^%s/%s$' % (subterp_pattern, version_pattern),
lt_cache(ChromeSubterpView.as_view()),
name=ChromeSubterpView.version_switch_view),
#Interpretation of a section/paragraph or appendix
#Example: http://.../201-4-Interp/2013-10704
url(r'^%s/%s$' % (interp_pattern, version_pattern),
lt_cache(ChromeInterpView.as_view()),
name='chrome_interp_view'),
#The whole regulation with chrome
#Example: http://.../201/2013-10704
url(r'^%s/%s$' % (reg_pattern, version_pattern),
lt_cache(ChromeRegulationView.as_view()),
name='chrome_regulation_view'),
#A regulation paragraph with chrome
#Example: http://.../201-2-g/2013-10704
url(r'^%s/%s$' % (paragraph_pattern, version_pattern),
lt_cache(ChromeParagraphView.as_view()),
name='chrome_paragraph_view'),
#A regulation landing page
#Example: http://.../201
url(r'^%s$' % reg_pattern, ChromeLandingView.as_view(),
name='regulation_landing_view'),
# Load just the sidebar
# Example: http://.../partial/sidebar/201-2/2013-10704
url(r'^partial/sidebar/%s/%s$' % (paragraph_pattern, version_pattern),
SideBarView.as_view(),
name='sidebar'),
# Load just search results
url(r'^partial/search/%s$' % reg_pattern,
PartialSearch.as_view(),
name='partial_search'),
#A diff view of a section (without chrome)
url(r'^partial/diff/%s/%s/%s$' % (
section_pattern, version_pattern, newer_version_pattern),
lt_cache(PartialSectionDiffView.as_view()),
name='partial_section_diff_view'),
#A section by section paragraph (without chrome)
#Example: http://.../partial/sxs/201-2-g/2011-1738
url(r'^partial/sxs/%s/%s$' % (paragraph_pattern, notice_pattern),
lt_cache(ParagraphSXSView.as_view()),
name='paragraph_sxs_view'),
#A definition templated to be displayed in the sidebar (without chrome)
#Example: http://.../partial/definition/201-2-g/2011-1738
url(r'^partial/definition/%s/%s$' % (paragraph_pattern, version_pattern),
lt_cache(PartialDefinitionView.as_view()),
name='partial_definition_view'),
#A regulation section without chrome
#Example: http://.../partial/201-4/2013-10704
url(r'^partial/%s/%s$' % (section_pattern, version_pattern),
lt_cache(PartialSectionView.as_view()),
name='partial_section_view'),
# Subterp, interpretations of a whole subpart, emptypart or appendices
# Example: http://.../partial/201-Subpart-A-Interp/2013-10706
# http://.../partial/201-Subpart-Interp/2013-10706
# http://.../partial/201-Appendices-Interp/2013-10706
url(r'^partial/%s/%s$' % (subterp_pattern, version_pattern),
lt_cache(partial_interp.PartialSubterpView.as_view()),
name='partial_subterp_view'),
#An interpretation of a section/paragraph or appendix without chrome.
#Example: http://.../partial/201-2-Interp/2013-10704
url(r'^partial/%s/%s$' % (interp_pattern, version_pattern),
lt_cache(partial_interp.PartialInterpView.as_view()),
name='partial_interp_view'),
#The whole regulation without chrome; not too useful; added for symmetry
#Example: http://.../partial/201/2013-10704
url(r'^partial/%s/%s$' % (reg_pattern, version_pattern),
lt_cache(PartialRegulationView.as_view()),
name='partial_regulation_view'),
#A regulation paragraph without chrome.
#Example: http://.../partial/201-2-g/2013-10704
url(r'^partial/%s/%s$' % (paragraph_pattern, version_pattern),
lt_cache(PartialParagraphView.as_view()),
name='partial_paragraph_view'),
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from numpy._core.strings import *
from numpy._core.strings import __all__, __doc__
|
python
|
github
|
https://github.com/numpy/numpy
|
numpy/strings/__init__.py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.network_client = cls.os.network_client
cls.networks_client = cls.os.networks_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name('server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
def _create_net_subnet_ret_net_from_cidr(self, cidr):
name_net = data_utils.rand_name(self.__class__.__name__)
net = self.networks_client.create_network(name=name_net)
self.addCleanup(self.networks_client.delete_network,
net['network']['id'])
subnet = self.subnets_client.create_subnet(
network_id=net['network']['id'],
cidr=cidr,
ip_version=4)
self.addCleanup(self.subnets_client.delete_subnet,
subnet['subnet']['id'])
return net
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
# NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
# Here we compare directly with the canonicalized format.
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
@test.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
def test_list_servers(self):
# The created server should be in the list of all servers
body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
def test_list_servers_with_detail(self):
# The created server should be in the detailed list of all servers
body = self.client.list_servers(detail=True)
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('cbc0f52f-05aa-492b-bdc1-84b575ca294b')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@test.idempotent_id('ac1ad47f-984b-4441-9274-c9079b7a0666')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertTrue(linux_client.hostname_equals_servername(self.name))
@test.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
def test_create_server_with_scheduler_hint_group(self):
# Create a server with the scheduler hint "group".
name = data_utils.rand_name('server_group')
policies = ['affinity']
body = self.server_groups_client.create_server_group(
name=name, policies=policies)['server_group']
group_id = body['id']
self.addCleanup(self.server_groups_client.delete_server_group,
group_id)
hints = {'group': group_id}
server = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')
# Check a server is in the group
server_group = (self.server_groups_client.show_server_group(group_id)
['server_group'])
self.assertIn(server['id'], server_group['members'])
@test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# Cleanup server; this is needed in the test case because with the LIFO
# nature of the cleanups, if we don't delete the server first, the port
# will still be part of the subnet and we'll get a 409 from Neutron
# when trying to delete the subnet. The tear down in the base class
# will try to delete the server and get a 404 but it's ignored so
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
# We can't predict the ip addresses assigned to the server on networks.
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
@test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
# The below skipUnless should be removed once Kilo-eol happens.
@testtools.skipUnless(CONF.compute_feature_enabled.
allow_duplicate_networks,
'Duplicate networks must be allowed')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']},
{'uuid': net1['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr'],
addresses[net1['network']['name']][1]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24'),
netaddr.IPNetwork('19.80.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
@test.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor with extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_with_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id,
ephemeral=1))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor without extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_no_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_no_eph_disk_id))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_extra_specs()
flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
server_no_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
# Get partition number of server without extra specs.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num = len(linux_client.get_partitions().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def skip_checks(cls):
super(ServersTestManualDisk, cls).skip_checks()
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import hashlib
import hmac
import time
import urlparse
import unittest
from lxml import objectify
import odoo
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment.tests.common import PaymentAcquirerCommon
from odoo.addons.payment_authorize.controllers.main import AuthorizeController
from odoo.tools import mute_logger
@odoo.tests.common.at_install(True)
@odoo.tests.common.post_install(True)
class AuthorizeCommon(PaymentAcquirerCommon):
def setUp(self):
super(AuthorizeCommon, self).setUp()
# authorize only support USD in test environment
self.currency_usd = self.env['res.currency'].search([('name', '=', 'USD')], limit=1)[0]
# get the authorize account
self.authorize = self.env.ref('payment.payment_acquirer_authorize')
# Be sure to be in 'capture' mode
self.authorize.auto_confirm = 'confirm_so'
@odoo.tests.common.at_install(True)
@odoo.tests.common.post_install(True)
class AuthorizeForm(AuthorizeCommon):
def _authorize_generate_hashing(self, values):
data = '^'.join([
values['x_login'],
values['x_fp_sequence'],
values['x_fp_timestamp'],
values['x_amount'],
]) + '^'
return hmac.new(str(values['x_trans_key']), data, hashlib.md5).hexdigest()
def test_10_Authorize_form_render(self):
self.assertEqual(self.authorize.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
form_values = {
'x_login': self.authorize.authorize_login,
'x_trans_key': self.authorize.authorize_transaction_key,
'x_amount': '320.0',
'x_show_form': 'PAYMENT_FORM',
'x_type': 'AUTH_CAPTURE',
'x_method': 'CC',
'x_fp_sequence': '%s%s' % (self.authorize.id, int(time.time())),
'x_version': '3.1',
'x_relay_response': 'TRUE',
'x_fp_timestamp': str(int(time.time())),
'x_relay_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._return_url),
'x_cancel_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._cancel_url),
'return_url': None,
'x_currency_code': 'USD',
'x_invoice_num': 'SO004',
'x_first_name': 'Norbert',
'x_last_name': 'Buyer',
'x_address': 'Huge Street 2/543',
'x_city': 'Sin City',
'x_zip': '1000',
'x_country': 'Belgium',
'x_phone': '0032 12 34 56 78',
'x_email': 'norbert.buyer@example.com',
'x_state': None,
'x_ship_to_first_name': 'Norbert',
'x_ship_to_last_name': 'Buyer',
'x_ship_to_address': 'Huge Street 2/543',
'x_ship_to_city': 'Sin City',
'x_ship_to_zip': '1000',
'x_ship_to_country': 'Belgium',
'x_ship_to_phone': '0032 12 34 56 78',
'x_ship_to_email': 'norbert.buyer@example.com',
'x_ship_to_state': None,
}
form_values['x_fp_hash'] = self._authorize_generate_hashing(form_values)
# render the button
res = self.authorize.render('SO004', 320.0, self.currency_usd.id, values=self.buyer_values)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://test.authorize.net/gateway/transact.dll', 'Authorize: wrong form POST url')
for el in tree.iterfind('input'):
values = el.values()
if values[1] in ['submit', 'x_fp_hash', 'return_url', 'x_state', 'x_ship_to_state']:
continue
self.assertEqual(
unicode(values[2], "utf-8"),
form_values[values[1]],
'Authorize: wrong value for input %s: received %s instead of %s' % (values[1], values[2], form_values[values[1]])
)
@mute_logger('odoo.addons.payment_authorize.models.payment', 'ValidationError')
def test_20_authorize_form_management(self):
# be sure not to do stupid thing
self.assertEqual(self.authorize.environment, 'test', 'test without test environment')
# typical data posted by authorize after client has successfully paid
authorize_post_data = {
'return_url': u'/shop/payment/validate',
'x_MD5_Hash': u'7934485E1C105940BE854208D10FAB4F',
'x_account_number': u'XXXX0027',
'x_address': u'Huge Street 2/543',
'x_amount': u'320.00',
'x_auth_code': u'E4W7IU',
'x_avs_code': u'Y',
'x_card_type': u'Visa',
'x_cavv_response': u'2',
'x_city': u'Sun City',
'x_company': u'',
'x_country': u'Belgium',
'x_cust_id': u'',
'x_cvv2_resp_code': u'',
'x_description': u'',
'x_duty': u'0.00',
'x_email': u'norbert.buyer@example.com',
'x_fax': u'',
'x_first_name': u'Norbert',
'x_freight': u'0.00',
'x_invoice_num': u'SO004',
'x_last_name': u'Buyer',
'x_method': u'CC',
'x_phone': u'0032 12 34 56 78',
'x_po_num': u'',
'x_response_code': u'1',
'x_response_reason_code': u'1',
'x_response_reason_text': u'This transaction has been approved.',
'x_ship_to_address': u'Huge Street 2/543',
'x_ship_to_city': u'Sun City',
'x_ship_to_company': u'',
'x_ship_to_country': u'Belgium',
'x_ship_to_first_name': u'Norbert',
'x_ship_to_last_name': u'Buyer',
'x_ship_to_state': u'',
'x_ship_to_zip': u'1000',
'x_state': u'',
'x_tax': u'0.00',
'x_tax_exempt': u'FALSE',
'x_test_request': u'false',
'x_trans_id': u'2217460311',
'x_type': u'auth_capture',
'x_zip': u'1000'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.env['payment.transaction'].form_feedback(authorize_post_data, 'authorize')
tx = self.env['payment.transaction'].create({
'amount': 320.0,
'acquirer_id': self.authorize.id,
'currency_id': self.currency_usd.id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france.id})
# validate it
self.env['payment.transaction'].form_feedback(authorize_post_data, 'authorize')
# check state
self.assertEqual(tx.state, 'done', 'Authorize: validation did not put tx into done state')
self.assertEqual(tx.acquirer_reference, authorize_post_data.get('x_trans_id'), 'Authorize: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'acquirer_reference': False})
# simulate an error
authorize_post_data['x_response_code'] = u'3'
self.env['payment.transaction'].form_feedback(authorize_post_data, 'authorize')
# check state
self.assertEqual(tx.state, 'error', 'Authorize: erroneous validation did not put tx into error state')
@unittest.skip("Authorize s2s test disabled: We do not want to overload Authorize.net with runbot's requests")
def test_30_authorize_s2s(self):
# be sure not to do stupid thing
authorize = self.authorize
self.assertEqual(authorize.environment, 'test', 'test without test environment')
# add credential
# FIXME: put this test in master-nightly on odoo/odoo + create sandbox account
authorize.write({
'authorize_transaction_key': '',
'authorize_login': '',
})
self.assertTrue(authorize.authorize_test_credentials, 'Authorize.net: s2s authentication failed')
# create payment meethod
payment_token = self.env['payment.token'].create({
'acquirer_id': authorize.id,
'partner_id': self.buyer_id,
'cc_number': '4111 1111 1111 1111',
'cc_expiry': '02 / 26',
'cc_brand': 'visa',
'cc_cvc': '111',
'cc_holder_name': 'test',
})
# create normal s2s transaction
transaction = self.env['payment.transaction'].create({
'amount': 500,
'acquirer_id': authorize.id,
'type': 'server2server',
'currency_id': self.currency_usd.id,
'reference': 'test_ref_%s' % odoo.fields.Date.today(),
'payment_token_id': payment_token.id,
'partner_id': self.buyer_id,
})
transaction.authorize_s2s_do_transaction()
self.assertEqual(transaction.state, 'done',)
# switch to 'authorize only'
# create authorize only s2s transaction & capture it
self.authorize.auto_confirm = 'authorize'
transaction = self.env['payment.transaction'].create({
'amount': 500,
'acquirer_id': authorize.id,
'type': 'server2server',
'currency_id': self.currency_usd.id,
'reference': 'test_%s' % int(time.time()),
'payment_token_id': payment_token.id,
'partner_id': self.buyer_id,
})
transaction.authorize_s2s_do_transaction()
self.assertEqual(transaction.state, 'authorized')
transaction.action_capture()
self.assertEqual(transaction.state, 'done')
# create authorize only s2s transaction & void it
self.authorize.auto_confirm = 'authorize'
transaction = self.env['payment.transaction'].create({
'amount': 500,
'acquirer_id': authorize.id,
'type': 'server2server',
'currency_id': self.currency_usd.id,
'reference': 'test_%s' % int(time.time()),
'payment_token_id': payment_token.id,
'partner_id': self.buyer_id,
})
transaction.authorize_s2s_do_transaction()
self.assertEqual(transaction.state, 'authorized')
transaction.action_void()
self.assertEqual(transaction.state, 'cancel')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright 2015-2015 breakwa11
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
def create_obfs(method):
return plain(method)
obfs_map = {
'plain': (create_obfs,),
'origin': (create_obfs,),
}
class plain(object):
def __init__(self, method):
self.method = method
self.server_info = None
def init_data(self):
return b''
def set_server_info(self, server_info):
self.server_info = server_info
def client_pre_encrypt(self, buf):
return buf
def client_encode(self, buf):
return buf
def client_decode(self, buf):
# (buffer_to_recv, is_need_to_encode_and_send_back)
return (buf, False)
def client_post_decrypt(self, buf):
return buf
def server_pre_encrypt(self, buf):
return buf
def server_encode(self, buf):
return buf
def server_decode(self, buf):
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
return (buf, True, False)
def server_post_decrypt(self, buf):
return buf
def dispose(self):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\BrowserKit;
use Symfony\Component\BrowserKit\Exception\BadMethodCallException;
use Symfony\Component\BrowserKit\Exception\InvalidArgumentException;
use Symfony\Component\BrowserKit\Exception\LogicException;
use Symfony\Component\BrowserKit\Exception\RuntimeException;
use Symfony\Component\DomCrawler\Crawler;
use Symfony\Component\DomCrawler\Form;
use Symfony\Component\DomCrawler\Link;
use Symfony\Component\Process\PhpProcess;
use Symfony\Component\Process\Process;
/**
* Simulates a browser.
*
* To make the actual request, you need to implement the doRequest() method.
*
* If you want to be able to run requests in their own process (insulated flag),
* you need to also implement the getScript() method.
*
* @author Fabien Potencier <fabien@symfony.com>
*
* @template TRequest of object
* @template TResponse of object
*/
abstract class AbstractBrowser
{
protected History $history;
protected CookieJar $cookieJar;
protected array $server = [];
protected Request $internalRequest;
/** @psalm-var TRequest */
protected object $request;
protected Response $internalResponse;
/** @psalm-var TResponse */
protected object $response;
protected Crawler $crawler;
protected string|false $wrapContentPattern = false;
protected bool $insulated = false;
protected ?string $redirect;
protected bool $followRedirects = true;
protected bool $followMetaRefresh = false;
private int $maxRedirects = -1;
private int $redirectCount = 0;
private array $redirects = [];
private bool $isMainRequest = true;
/**
* @param array $server The server parameters (equivalent of $_SERVER)
*/
public function __construct(array $server = [], ?History $history = null, ?CookieJar $cookieJar = null)
{
$this->setServerParameters($server);
$this->history = $history ?? new History();
$this->cookieJar = $cookieJar ?? new CookieJar();
}
/**
* Sets whether to automatically follow redirects or not.
*/
public function followRedirects(bool $followRedirects = true): void
{
$this->followRedirects = $followRedirects;
}
/**
* Sets whether to automatically follow meta refresh redirects or not.
*/
public function followMetaRefresh(bool $followMetaRefresh = true): void
{
$this->followMetaRefresh = $followMetaRefresh;
}
/**
* Returns whether client automatically follows redirects or not.
*/
public function isFollowingRedirects(): bool
{
return $this->followRedirects;
}
/**
* Sets the maximum number of redirects that crawler can follow.
*/
public function setMaxRedirects(int $maxRedirects): void
{
$this->maxRedirects = $maxRedirects < 0 ? -1 : $maxRedirects;
$this->followRedirects = -1 !== $this->maxRedirects;
}
/**
* Returns the maximum number of redirects that crawler can follow.
*/
public function getMaxRedirects(): int
{
return $this->maxRedirects;
}
/**
* Sets the insulated flag.
*
* @throws LogicException When Symfony Process Component is not installed
*/
public function insulate(bool $insulated = true): void
{
if ($insulated && !class_exists(Process::class)) {
throw new LogicException('Unable to isolate requests as the Symfony Process Component is not installed. Try running "composer require symfony/process".');
}
$this->insulated = $insulated;
}
/**
* Sets server parameters.
*/
public function setServerParameters(array $server): void
{
$this->server = array_merge([
'HTTP_USER_AGENT' => 'Symfony BrowserKit',
], $server);
}
/**
* Sets single server parameter.
*/
public function setServerParameter(string $key, string $value): void
{
$this->server[$key] = $value;
}
/**
* Gets single server parameter for specified key.
*/
public function getServerParameter(string $key, mixed $default = ''): mixed
{
return $this->server[$key] ?? $default;
}
public function xmlHttpRequest(string $method, string $uri, array $parameters = [], array $files = [], array $server = [], ?string $content = null, bool $changeHistory = true): Crawler
{
$this->setServerParameter('HTTP_X_REQUESTED_WITH', 'XMLHttpRequest');
try {
return $this->request($method, $uri, $parameters, $files, $server, $content, $changeHistory);
} finally {
unset($this->server['HTTP_X_REQUESTED_WITH']);
}
}
/**
* Converts the request parameters into a JSON string and uses it as request content.
*/
public function jsonRequest(string $method, string $uri, array $parameters = [], array $server = [], bool $changeHistory = true): Crawler
{
$content = json_encode($parameters, \JSON_PRESERVE_ZERO_FRACTION);
$this->setServerParameter('CONTENT_TYPE', 'application/json');
$this->setServerParameter('HTTP_ACCEPT', 'application/json');
try {
return $this->request($method, $uri, [], [], $server, $content, $changeHistory);
} finally {
unset($this->server['CONTENT_TYPE']);
unset($this->server['HTTP_ACCEPT']);
}
}
/**
* Returns the History instance.
*/
public function getHistory(): History
{
return $this->history;
}
/**
* Returns the CookieJar instance.
*/
public function getCookieJar(): CookieJar
{
return $this->cookieJar;
}
/**
* Returns the current Crawler instance.
*/
public function getCrawler(): Crawler
{
return $this->crawler ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
}
/**
* Sets the content wrapper format.
*
* @example <table>%s</table>
*/
public function wrapContent(false|string $pattern): void
{
$this->wrapContentPattern = $pattern;
}
/**
* Returns the current BrowserKit Response instance.
*/
public function getInternalResponse(): Response
{
return $this->internalResponse ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
}
/**
* Returns the current origin response instance.
*
* The origin response is the response instance that is returned
* by the code that handles requests.
*
* @psalm-return TResponse
*
* @see doRequest()
*/
public function getResponse(): object
{
return $this->response ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
}
/**
* Returns the current BrowserKit Request instance.
*/
public function getInternalRequest(): Request
{
return $this->internalRequest ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
}
/**
* Returns the current origin Request instance.
*
* The origin request is the request instance that is sent
* to the code that handles requests.
*
* @psalm-return TRequest
*
* @see doRequest()
*/
public function getRequest(): object
{
return $this->request ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
}
/**
* Clicks on a given link.
*
* @param array $serverParameters An array of server parameters
*/
public function click(Link $link, array $serverParameters = []): Crawler
{
if ($link instanceof Form) {
return $this->submit($link, [], $serverParameters);
}
return $this->request($link->getMethod(), $link->getUri(), [], [], $serverParameters);
}
/**
* Clicks the first link (or clickable image) that contains the given text.
*
* @param string $linkText The text of the link or the alt attribute of the clickable image
* @param array $serverParameters An array of server parameters
*/
public function clickLink(string $linkText, array $serverParameters = []): Crawler
{
$crawler = $this->crawler ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
return $this->click($crawler->selectLink($linkText)->link(), $serverParameters);
}
/**
* Submits a form.
*
* @param array $values An array of form field values
* @param array $serverParameters An array of server parameters
*/
public function submit(Form $form, array $values = [], array $serverParameters = []): Crawler
{
$form->setValues($values);
return $this->request($form->getMethod(), $form->getUri(), $form->getPhpValues(), $form->getPhpFiles(), $serverParameters);
}
/**
* Finds the first form that contains a button with the given content and
* uses it to submit the given form field values.
*
* @param string $button The text content, id, value or name of the form <button> or <input type="submit">
* @param array $fieldValues Use this syntax: ['my_form[name]' => '...', 'my_form[email]' => '...']
* @param string $method The HTTP method used to submit the form
* @param array $serverParameters These values override the ones stored in $_SERVER (HTTP headers must include an HTTP_ prefix as PHP does)
*/
public function submitForm(string $button, array $fieldValues = [], string $method = 'POST', array $serverParameters = []): Crawler
{
$crawler = $this->crawler ?? throw new BadMethodCallException(\sprintf('The "request()" method must be called before "%s()".', __METHOD__));
$buttonNode = $crawler->selectButton($button);
if (0 === $buttonNode->count()) {
throw new InvalidArgumentException(\sprintf('There is no button with "%s" as its content, id, value or name.', $button));
}
$form = $buttonNode->form($fieldValues, $method);
return $this->submit($form, [], $serverParameters);
}
/**
* Calls a URI.
*
* @param string $method The request method
* @param string $uri The URI to fetch
* @param array $parameters The Request parameters
* @param array $files The files
* @param array $server The server parameters (HTTP headers are referenced with an HTTP_ prefix as PHP does)
* @param string $content The raw body data
* @param bool $changeHistory Whether to update the history or not (only used internally for back(), forward(), and reload())
*/
public function request(string $method, string $uri, array $parameters = [], array $files = [], array $server = [], ?string $content = null, bool $changeHistory = true): Crawler
{
if ($this->isMainRequest) {
$this->redirectCount = 0;
} else {
++$this->redirectCount;
}
$originalUri = $uri;
$uri = $this->getAbsoluteUri($uri);
$server = array_merge($this->server, $server);
if (!empty($server['HTTP_HOST']) && !parse_url($originalUri, \PHP_URL_HOST)) {
$uri = preg_replace('{^(https?\://)'.preg_quote($this->extractHost($uri)).'}', '${1}'.$server['HTTP_HOST'], $uri);
}
if (isset($server['HTTPS']) && !parse_url($originalUri, \PHP_URL_SCHEME)) {
$uri = preg_replace('{^'.parse_url($uri, \PHP_URL_SCHEME).'}', $server['HTTPS'] ? 'https' : 'http', $uri);
}
if (!isset($server['HTTP_REFERER']) && !$this->history->isEmpty()) {
$server['HTTP_REFERER'] = $this->history->current()->getUri();
}
if (empty($server['HTTP_HOST'])) {
$server['HTTP_HOST'] = $this->extractHost($uri);
}
$server['HTTPS'] = 'https' === parse_url($uri, \PHP_URL_SCHEME);
$this->internalRequest = new Request($uri, $method, $parameters, $files, $this->cookieJar->allValues($uri), $server, $content);
$this->request = $this->filterRequest($this->internalRequest);
if (true === $changeHistory) {
$this->history->add($this->internalRequest);
}
if ($this->insulated) {
$this->response = $this->doRequestInProcess($this->request);
} else {
$this->response = $this->doRequest($this->request);
}
$this->internalResponse = $this->filterResponse($this->response);
$this->cookieJar->updateFromResponse($this->internalResponse, $uri);
$status = $this->internalResponse->getStatusCode();
if ($status >= 300 && $status < 400) {
$this->redirect = $this->internalResponse->getHeader('Location');
} else {
$this->redirect = null;
}
if ($this->followRedirects && $this->redirect) {
$this->redirects[serialize($this->history->current())] = true;
return $this->crawler = $this->followRedirect();
}
$responseContent = $this->internalResponse->getContent();
if ($this->wrapContentPattern) {
$responseContent = \sprintf($this->wrapContentPattern, $responseContent);
}
$this->crawler = $this->createCrawlerFromContent($this->internalRequest->getUri(), $responseContent, $this->internalResponse->getHeader('Content-Type') ?? '');
// Check for meta refresh redirect
if ($this->followMetaRefresh && null !== $redirect = $this->getMetaRefreshUrl()) {
$this->redirect = $redirect;
$this->redirects[serialize($this->history->current())] = true;
$this->crawler = $this->followRedirect();
}
return $this->crawler;
}
/**
* Makes a request in another process.
*
* @psalm-param TRequest $request
*
* @psalm-return TResponse
*
* @throws \RuntimeException When processing returns exit code
*/
protected function doRequestInProcess(object $request): object
{
$deprecationsFile = tempnam(sys_get_temp_dir(), 'deprec');
putenv('SYMFONY_DEPRECATIONS_SERIALIZE='.$deprecationsFile);
$_ENV['SYMFONY_DEPRECATIONS_SERIALIZE'] = $deprecationsFile;
$process = new PhpProcess($this->getScript($request), null, null);
$process->run();
if (file_exists($deprecationsFile)) {
$deprecations = file_get_contents($deprecationsFile);
unlink($deprecationsFile);
foreach ($deprecations ? unserialize($deprecations) : [] as $deprecation) {
if ($deprecation[0]) {
// unsilenced on purpose
trigger_error($deprecation[1], \E_USER_DEPRECATED);
} else {
@trigger_error($deprecation[1], \E_USER_DEPRECATED);
}
}
}
if (!$process->isSuccessful() || !preg_match('/^O\:\d+\:/', $process->getOutput())) {
throw new RuntimeException(\sprintf('OUTPUT: %s ERROR OUTPUT: %s.', $process->getOutput(), $process->getErrorOutput()));
}
return unserialize($process->getOutput());
}
/**
* Makes a request.
*
* @psalm-param TRequest $request
*
* @psalm-return TResponse
*/
abstract protected function doRequest(object $request): object;
/**
* Returns the script to execute when the request must be insulated.
*
* @param object $request An origin request instance
*
* @psalm-param TRequest $request
*
* @throws LogicException When this abstract class is not implemented
*/
protected function getScript(object $request): string
{
throw new LogicException('To insulate requests, you need to override the getScript() method.');
}
/**
* Filters the BrowserKit request to the origin one.
*
* @psalm-return TRequest
*/
protected function filterRequest(Request $request): object
{
return $request;
}
/**
* Filters the origin response to the BrowserKit one.
*
* @psalm-param TResponse $response
*/
protected function filterResponse(object $response): Response
{
return $response;
}
/**
* Creates a crawler.
*
* This method returns null if the DomCrawler component is not available.
*/
protected function createCrawlerFromContent(string $uri, string $content, string $type): ?Crawler
{
if (!class_exists(Crawler::class)) {
return null;
}
$crawler = new Crawler(null, $uri, null);
$crawler->addContent($content, $type);
return $crawler;
}
/**
* Goes back in the browser history.
*/
public function back(): Crawler
{
do {
$request = $this->history->back();
} while (\array_key_exists(serialize($request), $this->redirects));
return $this->requestFromRequest($request, false);
}
/**
* Goes forward in the browser history.
*/
public function forward(): Crawler
{
do {
$request = $this->history->forward();
} while (\array_key_exists(serialize($request), $this->redirects));
return $this->requestFromRequest($request, false);
}
/**
* Reloads the current browser.
*/
public function reload(): Crawler
{
return $this->requestFromRequest($this->history->current(), false);
}
/**
* Follow redirects?
*
* @throws LogicException If request was not a redirect
*/
public function followRedirect(): Crawler
{
if (!isset($this->redirect)) {
throw new LogicException('The request was not redirected.');
}
if (-1 !== $this->maxRedirects) {
if ($this->redirectCount > $this->maxRedirects) {
$this->redirectCount = 0;
throw new LogicException(\sprintf('The maximum number (%d) of redirections was reached.', $this->maxRedirects));
}
}
$request = $this->internalRequest;
if (\in_array($this->internalResponse->getStatusCode(), [301, 302, 303], true)) {
$method = 'GET';
$files = [];
$content = null;
} else {
$method = $request->getMethod();
$files = $request->getFiles();
$content = $request->getContent();
}
if ('GET' === strtoupper($method)) {
// Don't forward parameters for GET request as it should reach the redirection URI
$parameters = [];
} else {
$parameters = $request->getParameters();
}
$server = $request->getServer();
$server = $this->updateServerFromUri($server, $this->redirect);
$this->isMainRequest = false;
$response = $this->request($method, $this->redirect, $parameters, $files, $server, $content);
$this->isMainRequest = true;
return $response;
}
/**
* @see https://dev.w3.org/html5/spec-preview/the-meta-element.html#attr-meta-http-equiv-refresh
*/
private function getMetaRefreshUrl(): ?string
{
$metaRefresh = $this->getCrawler()->filter('head meta[http-equiv="refresh"]');
foreach ($metaRefresh->extract(['content']) as $content) {
if (preg_match('/^\s*0\s*;\s*URL\s*=\s*(?|\'([^\']++)|"([^"]++)|([^\'"].*))/i', $content, $m)) {
return str_replace("\t\r\n", '', rtrim($m[1]));
}
}
return null;
}
/**
* Restarts the client.
*
* It flushes history and all cookies.
*/
public function restart(): void
{
$this->cookieJar->clear();
$this->history->clear();
}
/**
* Takes a URI and converts it to absolute if it is not already absolute.
*/
protected function getAbsoluteUri(string $uri): string
{
// already absolute?
if (str_starts_with($uri, 'http://') || str_starts_with($uri, 'https://')) {
return $uri;
}
if (!$this->history->isEmpty()) {
$currentUri = $this->history->current()->getUri();
} else {
$currentUri = \sprintf('http%s://%s/',
isset($this->server['HTTPS']) ? 's' : '',
$this->server['HTTP_HOST'] ?? 'localhost'
);
}
// protocol relative URL
if ('' !== trim($uri, '/') && str_starts_with($uri, '//')) {
return parse_url($currentUri, \PHP_URL_SCHEME).':'.$uri;
}
// anchor or query string parameters?
if (!$uri || '#' === $uri[0] || '?' === $uri[0]) {
return preg_replace('/[#?].*?$/', '', $currentUri).$uri;
}
if ('/' !== $uri[0]) {
$path = parse_url($currentUri, \PHP_URL_PATH);
if (!str_ends_with($path, '/')) {
$path = substr($path, 0, strrpos($path, '/') + 1);
}
$uri = $path.$uri;
}
return preg_replace('#^(.*?//[^/?]+)[/?].*$#', '$1', $currentUri).$uri;
}
/**
* Makes a request from a Request object directly.
*
* @param bool $changeHistory Whether to update the history or not (only used internally for back(), forward(), and reload())
*/
protected function requestFromRequest(Request $request, bool $changeHistory = true): Crawler
{
return $this->request($request->getMethod(), $request->getUri(), $request->getParameters(), $request->getFiles(), $request->getServer(), $request->getContent(), $changeHistory);
}
private function updateServerFromUri(array $server, string $uri): array
{
$server['HTTP_HOST'] = $this->extractHost($uri);
$scheme = parse_url($uri, \PHP_URL_SCHEME);
$server['HTTPS'] = null === $scheme ? $server['HTTPS'] : 'https' === $scheme;
unset($server['HTTP_IF_NONE_MATCH'], $server['HTTP_IF_MODIFIED_SINCE']);
return $server;
}
private function extractHost(string $uri): ?string
{
$host = parse_url($uri, \PHP_URL_HOST);
if ($port = parse_url($uri, \PHP_URL_PORT)) {
return $host.':'.$port;
}
return $host;
}
}
// @php-cs-fixer-ignore error_suppression This file is explicitly expected to not silence each of trigger_error calls
|
php
|
github
|
https://github.com/symfony/symfony
|
src/Symfony/Component/BrowserKit/AbstractBrowser.php
|
"""This file contains the TestCommandLog class."""
import unittest
import mock
import pygame
from src.CommandLog import CommandLog
from src.CustomEvent import CustomEvent
from src.Point import Point
class TestCommandLog(unittest.TestCase):
"""This test class unit tests the CommandLog class."""
def setUp(self):
"""Create an empty CommandLog and an example BeeBot memory."""
# Start pygame as Icons require fonts.
pygame.init()
self.test_command_log = CommandLog(Point(0, 0), (25, 5))
self.beebot_mem = [pygame.event.Event(CustomEvent.MOVE_BEEBOT_UP),
pygame.event.Event(CustomEvent.MOVE_BEEBOT_LEFT),
pygame.event.Event(CustomEvent.MOVE_BEEBOT_RIGHT),
pygame.event.Event(CustomEvent.MOVE_BEEBOT_DOWN)]
def test_update(self):
"""Test update() constructs a CommandLog correctly."""
self.test_command_log.update(self.beebot_mem)
for index in self.test_command_log.icons.keys():
icon = self.test_command_log.icons[index]
# Check each Icon is 5x5
self.assertEqual(icon.size, (5, 5))
# Check each subsequent Icon is 5 pixels
# to the left of the previous.
self.assertEqual(icon.screen_location, Point(index * 5, 0))
# Patch so we can tell if removal_all is called
@mock.patch('src.IconGroup.IconGroup.removal_all')
def test_update_do_nothing(self, mock_removal_all):
"""Test update() when no update to the BeeBot memory has occured."""
# We assume this will work fine
self.test_command_log.update(self.beebot_mem)
# This should do nothing
self.test_command_log.update(self.beebot_mem)
# Assert removal_all was called exactly once
# (as part of the first update()).
self.assertEqual(mock_removal_all.call_count, 1)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-networks-associate"
extra_extensions_to_load = ["os-networks"]
_sentinel = object()
def _get_flags(self):
f = super(NetworksAssociateJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.networks_associate.'
'Networks_associate')
return f
def setUp(self):
super(NetworksAssociateJsonTests, self).setUp()
def fake_associate(self, context, network_id,
host=NetworksAssociateJsonTests._sentinel,
project=NetworksAssociateJsonTests._sentinel):
return True
self.stub_out("nova.network.api.API.associate", fake_associate)
def test_disassociate(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_disassociate_host(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_disassociate_project(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_associate_host(self):
response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from enum_ext import *
>>> identity(color.red) # in case of duplicated enums it always take the last enum
enum_ext.color.blood
>>> identity(color.green)
enum_ext.color.green
>>> identity(color.blue)
enum_ext.color.blue
>>> identity(color(1)) # in case of duplicated enums it always take the last enum
enum_ext.color.blood
>>> identity(color(2))
enum_ext.color.green
>>> identity(color(3))
enum_ext.color(3)
>>> identity(color(4))
enum_ext.color.blue
--- check export to scope ---
>>> identity(red)
enum_ext.color.blood
>>> identity(green)
enum_ext.color.green
>>> identity(blue)
enum_ext.color.blue
>>> try: identity(1)
... except TypeError: pass
... else: print 'expected a TypeError'
>>> c = colorized()
>>> c.x
enum_ext.color.blood
>>> c.x = green
>>> c.x
enum_ext.color.green
>>> red == blood
True
>>> red == green
False
>>> hash(red) == hash(blood)
True
>>> hash(red) == hash(green)
False
'''
# pickling of enums only works with Python 2.3 or higher
exercise_pickling = '''
>>> import pickle
>>> p = pickle.dumps(color.green, pickle.HIGHEST_PROTOCOL)
>>> l = pickle.loads(p)
>>> identity(l)
enum_ext.color.green
'''
def run(args = None):
import sys
import doctest
import pickle
if args is not None:
sys.argv = args
self = sys.modules.get(__name__)
if (hasattr(pickle, "HIGHEST_PROTOCOL")):
self.__doc__ += exercise_pickling
return doctest.testmod(self)
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"strings"
"testing"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/testutil"
)
func TestKubeletCredentialProviderPluginErrors(t *testing.T) {
expectedValue := `
# HELP kubelet_credential_provider_plugin_errors_total [ALPHA] Number of errors from credential provider plugin
# TYPE kubelet_credential_provider_plugin_errors_total counter
kubelet_credential_provider_plugin_errors_total{plugin_name="test-plugin"} 1
`
metricNames := []string{
"kubelet_credential_provider_plugin_errors_total",
}
kubeletCredentialProviderPluginErrors.Reset()
registerMetrics()
kubeletCredentialProviderPluginErrors.WithLabelValues("test-plugin").Inc()
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedValue), metricNames...); err != nil {
t.Fatal(err)
}
}
func TestKubeletCredentialProviderPluginDuration(t *testing.T) {
expectedValue := `
# HELP kubelet_credential_provider_plugin_duration [ALPHA] Duration of execution in seconds for credential provider plugin
# TYPE kubelet_credential_provider_plugin_duration histogram
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.005"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.01"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.025"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.05"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.1"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.25"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="0.5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="1"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="2.5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="10"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="test-plugin",le="+Inf"} 1
kubelet_credential_provider_plugin_duration_sum{plugin_name="test-plugin"} 0.3
kubelet_credential_provider_plugin_duration_count{plugin_name="test-plugin"} 1
`
metricNames := []string{
"kubelet_credential_provider_plugin_duration",
}
kubeletCredentialProviderPluginDuration.Reset()
registerMetrics()
kubeletCredentialProviderPluginDuration.WithLabelValues("test-plugin").Observe(0.3)
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedValue), metricNames...); err != nil {
t.Fatal(err)
}
}
func TestKubeletCredentialProviderConfigInfo(t *testing.T) {
expectedValue := `
# HELP kubelet_credential_provider_config_info [ALPHA] Information about the last applied credential provider configuration with hash as label
# TYPE kubelet_credential_provider_config_info gauge
kubelet_credential_provider_config_info{hash="sha256:abcd1234"} 1
`
metricNames := []string{
"kubelet_credential_provider_config_info",
}
kubeletCredentialProviderPluginErrors.Reset()
kubeletCredentialProviderPluginDuration.Reset()
registerMetrics()
recordCredentialProviderConfigHash("sha256:abcd1234")
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedValue), metricNames...); err != nil {
t.Fatal(err)
}
}
func TestMultiplePluginErrors(t *testing.T) {
expectedValue := `
# HELP kubelet_credential_provider_plugin_errors_total [ALPHA] Number of errors from credential provider plugin
# TYPE kubelet_credential_provider_plugin_errors_total counter
kubelet_credential_provider_plugin_errors_total{plugin_name="plugin-a"} 2
kubelet_credential_provider_plugin_errors_total{plugin_name="plugin-b"} 1
`
metricNames := []string{
"kubelet_credential_provider_plugin_errors_total",
}
kubeletCredentialProviderPluginErrors.Reset()
registerMetrics()
kubeletCredentialProviderPluginErrors.WithLabelValues("plugin-a").Inc()
kubeletCredentialProviderPluginErrors.WithLabelValues("plugin-a").Inc()
kubeletCredentialProviderPluginErrors.WithLabelValues("plugin-b").Inc()
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedValue), metricNames...); err != nil {
t.Fatal(err)
}
}
func TestMultiplePluginDurations(t *testing.T) {
expectedValue := `
# HELP kubelet_credential_provider_plugin_duration [ALPHA] Duration of execution in seconds for credential provider plugin
# TYPE kubelet_credential_provider_plugin_duration histogram
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.005"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.01"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.025"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.05"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.1"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.25"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="0.5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="1"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="2.5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="10"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="fast-plugin",le="+Inf"} 1
kubelet_credential_provider_plugin_duration_sum{plugin_name="fast-plugin"} 0.001
kubelet_credential_provider_plugin_duration_count{plugin_name="fast-plugin"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.005"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.01"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.025"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.05"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.1"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.25"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="0.5"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="1"} 0
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="2.5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="5"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="10"} 1
kubelet_credential_provider_plugin_duration_bucket{plugin_name="slow-plugin",le="+Inf"} 1
kubelet_credential_provider_plugin_duration_sum{plugin_name="slow-plugin"} 2
kubelet_credential_provider_plugin_duration_count{plugin_name="slow-plugin"} 1
`
metricNames := []string{
"kubelet_credential_provider_plugin_duration",
}
kubeletCredentialProviderPluginDuration.Reset()
registerMetrics()
kubeletCredentialProviderPluginDuration.WithLabelValues("fast-plugin").Observe(0.001)
kubeletCredentialProviderPluginDuration.WithLabelValues("slow-plugin").Observe(2.0)
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedValue), metricNames...); err != nil {
t.Fatal(err)
}
}
func TestCredentialProviderConfigInfoWithDifferentHashes(t *testing.T) {
expectedValue := `
# HELP kubelet_credential_provider_config_info [ALPHA] Information about the last applied credential provider configuration with hash as label
# TYPE kubelet_credential_provider_config_info gauge
kubelet_credential_provider_config_info{hash="sha256:config2"} 1
`
metricNames := []string{
"kubelet_credential_provider_config_info",
}
registerMetrics()
// With custom collector, only the last hash is shown (current state)
recordCredentialProviderConfigHash("sha256:config1")
recordCredentialProviderConfigHash("sha256:config2")
if err := testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(expectedValue), metricNames...); err != nil {
t.Fatal(err)
}
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/credentialprovider/plugin/metrics_test.go
|
"""Sanity test for symlinks in the bin directory."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ... import types as t
from . import (
SanityVersionNeutral,
SanityMessage,
SanityFailure,
SanitySuccess,
)
from ...config import (
SanityConfig,
)
from ...data import (
data_context,
)
from ...payload import (
ANSIBLE_BIN_SYMLINK_MAP,
__file__ as symlink_map_full_path,
)
from ...util import (
ANSIBLE_BIN_PATH,
ANSIBLE_TEST_DATA_ROOT,
)
class BinSymlinksTest(SanityVersionNeutral):
"""Sanity test for symlinks in the bin directory."""
ansible_only = True
@property
def can_ignore(self): # type: () -> bool
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self): # type: () -> bool
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
# noinspection PyUnusedLocal
def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
bin_root = ANSIBLE_BIN_PATH
bin_names = os.listdir(bin_root)
bin_paths = sorted(os.path.join(bin_root, path) for path in bin_names)
injector_root = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
injector_names = os.listdir(injector_root)
errors = [] # type: t.List[t.Tuple[str, str]]
symlink_map_path = os.path.relpath(symlink_map_full_path, data_context().content.root)
for bin_path in bin_paths:
if not os.path.islink(bin_path):
errors.append((bin_path, 'not a symbolic link'))
continue
dest = os.readlink(bin_path)
if not os.path.exists(bin_path):
errors.append((bin_path, 'points to non-existent path "%s"' % dest))
continue
if not os.path.isfile(bin_path):
errors.append((bin_path, 'points to non-file "%s"' % dest))
continue
map_dest = ANSIBLE_BIN_SYMLINK_MAP.get(os.path.basename(bin_path))
if not map_dest:
errors.append((bin_path, 'missing from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % symlink_map_path))
continue
if dest != map_dest:
errors.append((bin_path, 'points to "%s" instead of "%s" from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, map_dest, symlink_map_path)))
continue
if not os.access(bin_path, os.X_OK):
errors.append((bin_path, 'points to non-executable file "%s"' % dest))
continue
for bin_name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
if bin_name not in bin_names:
bin_path = os.path.join(bin_root, bin_name)
errors.append((bin_path, 'missing symlink to "%s" defined in ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, symlink_map_path)))
if bin_name not in injector_names:
injector_path = os.path.join(injector_root, bin_name)
errors.append((injector_path, 'missing symlink to "python.py"'))
messages = [SanityMessage(message=message, path=os.path.relpath(path, data_context().content.root), confidence=100) for path, message in errors]
if errors:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Many-to-many relationships
To define a many-to-many relationship, use ``ManyToManyField()``.
In this example, an ``Article`` can be published in multiple ``Publication``
objects, and a ``Publication`` has multiple ``Article`` objects.
"""
from django.db import models
class Publication(models.Model):
title = models.CharField(max_length=30)
class Meta:
ordering = ("title",)
def __str__(self):
return self.title
class Tag(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class NoDeletedArticleManager(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(headline="deleted")
class Article(models.Model):
headline = models.CharField(max_length=100)
# Assign a string as name to make sure the intermediary model is
# correctly created. Refs #20207
publications = models.ManyToManyField(Publication, name="publications")
tags = models.ManyToManyField(Tag, related_name="tags")
authors = models.ManyToManyField("User", through="UserArticle")
objects = NoDeletedArticleManager()
class Meta:
ordering = ("headline",)
def __str__(self):
return self.headline
class User(models.Model):
username = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.username
class UserArticle(models.Model):
user = models.ForeignKey(User, models.CASCADE, to_field="username")
article = models.ForeignKey(Article, models.CASCADE)
# Models to test correct related_name inheritance
class AbstractArticle(models.Model):
class Meta:
abstract = True
publications = models.ManyToManyField(
Publication, name="publications", related_name="+"
)
class InheritedArticleA(AbstractArticle):
pass
class InheritedArticleB(AbstractArticle):
pass
class NullableTargetArticle(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(
Publication, through="NullablePublicationThrough"
)
class NullablePublicationThrough(models.Model):
article = models.ForeignKey(NullableTargetArticle, models.CASCADE)
publication = models.ForeignKey(Publication, models.CASCADE, null=True)
|
python
|
github
|
https://github.com/django/django
|
tests/many_to_many/models.py
|
/* MIT License
*
* Copyright (c) 1998 Massachusetts Institute of Technology
* Copyright (c) The c-ares project and its contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_private.h"
#ifdef HAVE_NETINET_IN_H
# include <netinet/in.h>
#endif
#ifdef HAVE_NETDB_H
# include <netdb.h>
#endif
#ifdef HAVE_ARPA_INET_H
# include <arpa/inet.h>
#endif
#include "ares_nameser.h"
#include "ares_inet_net_pton.h"
struct addr_query {
/* Arguments passed to ares_gethostbyaddr() */
ares_channel_t *channel;
struct ares_addr addr;
ares_host_callback callback;
void *arg;
char *lookups; /* duplicate memory from channel for ares_reinit() */
const char *remaining_lookups;
size_t timeouts;
};
static void next_lookup(struct addr_query *aquery);
static void addr_callback(void *arg, ares_status_t status, size_t timeouts,
const ares_dns_record_t *dnsrec);
static void end_aquery(struct addr_query *aquery, ares_status_t status,
struct hostent *host);
static ares_status_t file_lookup(ares_channel_t *channel,
const struct ares_addr *addr,
struct hostent **host);
void ares_gethostbyaddr_nolock(ares_channel_t *channel, const void *addr,
int addrlen, int family,
ares_host_callback callback, void *arg)
{
struct addr_query *aquery;
if (family != AF_INET && family != AF_INET6) {
callback(arg, ARES_ENOTIMP, 0, NULL);
return;
}
if ((family == AF_INET && addrlen != sizeof(aquery->addr.addr.addr4)) ||
(family == AF_INET6 && addrlen != sizeof(aquery->addr.addr.addr6))) {
callback(arg, ARES_ENOTIMP, 0, NULL);
return;
}
aquery = ares_malloc(sizeof(struct addr_query));
if (!aquery) {
callback(arg, ARES_ENOMEM, 0, NULL);
return;
}
aquery->lookups = ares_strdup(channel->lookups);
if (aquery->lookups == NULL) {
/* LCOV_EXCL_START: OutOfMemory */
ares_free(aquery);
callback(arg, ARES_ENOMEM, 0, NULL);
return;
/* LCOV_EXCL_STOP */
}
aquery->channel = channel;
if (family == AF_INET) {
memcpy(&aquery->addr.addr.addr4, addr, sizeof(aquery->addr.addr.addr4));
} else {
memcpy(&aquery->addr.addr.addr6, addr, sizeof(aquery->addr.addr.addr6));
}
aquery->addr.family = family;
aquery->callback = callback;
aquery->arg = arg;
aquery->remaining_lookups = aquery->lookups;
aquery->timeouts = 0;
next_lookup(aquery);
}
void ares_gethostbyaddr(ares_channel_t *channel, const void *addr, int addrlen,
int family, ares_host_callback callback, void *arg)
{
if (channel == NULL) {
return;
}
ares_channel_lock(channel);
ares_gethostbyaddr_nolock(channel, addr, addrlen, family, callback, arg);
ares_channel_unlock(channel);
}
static void next_lookup(struct addr_query *aquery)
{
const char *p;
ares_status_t status;
struct hostent *host = NULL;
char *name;
for (p = aquery->remaining_lookups; *p; p++) {
switch (*p) {
case 'b':
name = ares_dns_addr_to_ptr(&aquery->addr);
if (name == NULL) {
end_aquery(aquery, ARES_ENOMEM,
NULL); /* LCOV_EXCL_LINE: OutOfMemory */
return; /* LCOV_EXCL_LINE: OutOfMemory */
}
aquery->remaining_lookups = p + 1;
ares_query_nolock(aquery->channel, name, ARES_CLASS_IN,
ARES_REC_TYPE_PTR, addr_callback, aquery, NULL);
ares_free(name);
return;
case 'f':
status = file_lookup(aquery->channel, &aquery->addr, &host);
/* this status check below previously checked for !ARES_ENOTFOUND,
but we should not assume that this single error code is the one
that can occur, as that is in fact no longer the case */
if (status == ARES_SUCCESS) {
end_aquery(aquery, status, host);
return;
}
break;
default:
break;
}
}
end_aquery(aquery, ARES_ENOTFOUND, NULL);
}
static void addr_callback(void *arg, ares_status_t status, size_t timeouts,
const ares_dns_record_t *dnsrec)
{
struct addr_query *aquery = (struct addr_query *)arg;
struct hostent *host;
size_t addrlen;
aquery->timeouts += timeouts;
if (status == ARES_SUCCESS) {
if (aquery->addr.family == AF_INET) {
addrlen = sizeof(aquery->addr.addr.addr4);
status = ares_parse_ptr_reply_dnsrec(dnsrec, &aquery->addr.addr.addr4,
(int)addrlen, AF_INET, &host);
} else {
addrlen = sizeof(aquery->addr.addr.addr6);
status = ares_parse_ptr_reply_dnsrec(dnsrec, &aquery->addr.addr.addr6,
(int)addrlen, AF_INET6, &host);
}
end_aquery(aquery, status, host);
} else if (status == ARES_EDESTRUCTION || status == ARES_ECANCELLED) {
end_aquery(aquery, status, NULL);
} else {
next_lookup(aquery);
}
}
static void end_aquery(struct addr_query *aquery, ares_status_t status,
struct hostent *host)
{
aquery->callback(aquery->arg, (int)status, (int)aquery->timeouts, host);
if (host) {
ares_free_hostent(host);
}
ares_free(aquery->lookups);
ares_free(aquery);
}
static ares_status_t file_lookup(ares_channel_t *channel,
const struct ares_addr *addr,
struct hostent **host)
{
char ipaddr[INET6_ADDRSTRLEN];
const void *ptr = NULL;
const ares_hosts_entry_t *entry;
ares_status_t status;
if (addr->family == AF_INET) {
ptr = &addr->addr.addr4;
} else if (addr->family == AF_INET6) {
ptr = &addr->addr.addr6;
}
if (ptr == NULL) {
return ARES_ENOTFOUND;
}
if (!ares_inet_ntop(addr->family, ptr, ipaddr, sizeof(ipaddr))) {
return ARES_ENOTFOUND;
}
status = ares_hosts_search_ipaddr(channel, ARES_FALSE, ipaddr, &entry);
if (status != ARES_SUCCESS) {
return status;
}
status = ares_hosts_entry_to_hostent(entry, addr->family, host);
if (status != ARES_SUCCESS) {
return status; /* LCOV_EXCL_LINE: OutOfMemory */
}
return ARES_SUCCESS;
}
|
c
|
github
|
https://github.com/nodejs/node
|
deps/cares/src/lib/ares_gethostbyaddr.c
|
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import copy
from typing import Any, Callable, Collection, Dict, Iterable, Optional, Sequence, Set
from django.conf import settings
from django.utils.translation import gettext as _
from version import API_FEATURE_LEVEL, ZULIP_VERSION
from zerver.lib.actions import (
default_stream_groups_to_dicts_sorted,
do_get_streams,
gather_subscriptions_helper,
get_available_notification_sounds,
get_default_streams_for_realm,
get_owned_bot_dicts,
get_web_public_streams,
get_web_public_subs,
streams_to_dicts_sorted,
)
from zerver.lib.alert_words import user_alert_words
from zerver.lib.avatar import avatar_url
from zerver.lib.bot_config import load_bot_config_template
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.integrations import EMBEDDED_BOTS, WEBHOOK_INTEGRATIONS
from zerver.lib.message import (
aggregate_unread_data,
apply_unread_message_event,
extract_unread_data_from_um_rows,
get_raw_unread_data,
get_recent_conversations_recipient_id,
get_recent_private_conversations,
get_starred_message_ids,
remove_message_id_from_unread_mgs,
)
from zerver.lib.narrow import check_supported_events_narrow_filter, read_stop_words
from zerver.lib.presence import get_presence_for_user, get_presences_for_realm
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_source, get_realm_logo_url
from zerver.lib.request import JsonableError
from zerver.lib.soft_deactivation import reactivate_user_if_soft_deactivated
from zerver.lib.stream_subscription import handle_stream_notifications_compatibility
from zerver.lib.topic import TOPIC_NAME
from zerver.lib.topic_mutes import get_topic_mutes
from zerver.lib.user_groups import user_groups_in_realm_serialized
from zerver.lib.user_mutes import get_user_mutes
from zerver.lib.user_status import get_user_info_dict
from zerver.lib.users import get_cross_realm_dicts, get_raw_user_data, is_administrator_role
from zerver.models import (
MAX_MESSAGE_LENGTH,
MAX_TOPIC_NAME_LENGTH,
Client,
CustomProfileField,
Message,
Realm,
Stream,
UserMessage,
UserProfile,
custom_profile_fields_for_realm,
get_default_stream_groups,
get_realm_domains,
get_realm_playgrounds,
linkifiers_for_realm,
realm_filters_for_realm,
)
from zerver.tornado.django_api import get_user_events, request_event_queue
from zproject.backends import email_auth_enabled, password_auth_enabled
class RestartEventException(Exception):
"""
Special error for handling restart events in apply_events.
"""
def add_realm_logo_fields(state: Dict[str, Any], realm: Realm) -> None:
state["realm_logo_url"] = get_realm_logo_url(realm, night=False)
state["realm_logo_source"] = get_realm_logo_source(realm, night=False)
state["realm_night_logo_url"] = get_realm_logo_url(realm, night=True)
state["realm_night_logo_source"] = get_realm_logo_source(realm, night=True)
state["max_logo_file_size"] = settings.MAX_LOGO_FILE_SIZE
def always_want(msg_type: str) -> bool:
"""
This function is used as a helper in
fetch_initial_state_data, when the user passes
in None for event_types, and we want to fetch
info for every event type. Defining this at module
level makes it easier to mock.
"""
return True
def fetch_initial_state_data(
user_profile: Optional[UserProfile],
*,
realm: Optional[Realm] = None,
event_types: Optional[Iterable[str]] = None,
queue_id: Optional[str] = "",
client_gravatar: bool = False,
user_avatar_url_field_optional: bool = False,
slim_presence: bool = False,
include_subscribers: bool = True,
include_streams: bool = True,
) -> Dict[str, Any]:
"""When `event_types` is None, fetches the core data powering the
webapp's `page_params` and `/api/v1/register` (for mobile/terminal
apps). Can also fetch a subset as determined by `event_types`.
The user_profile=None code path is used for logged-out public
access to streams with is_web_public=True.
Whenever you add new code to this function, you should also add
corresponding events for changes in the data structures and new
code to apply_events (and add a test in test_events.py).
"""
if realm is None:
assert user_profile is not None
realm = user_profile.realm
state: Dict[str, Any] = {"queue_id": queue_id}
if event_types is None:
# return True always
want: Callable[[str], bool] = always_want
else:
want = set(event_types).__contains__
# Show the version info unconditionally.
state["zulip_version"] = ZULIP_VERSION
state["zulip_feature_level"] = API_FEATURE_LEVEL
if want("alert_words"):
state["alert_words"] = [] if user_profile is None else user_alert_words(user_profile)
if want("custom_profile_fields"):
fields = custom_profile_fields_for_realm(realm.id)
state["custom_profile_fields"] = [f.as_dict() for f in fields]
state["custom_profile_field_types"] = {
item[4]: {"id": item[0], "name": str(item[1])}
for item in CustomProfileField.ALL_FIELD_TYPES
}
if want("hotspots"):
# Even if we offered special hotspots for guests without an
# account, we'd maybe need to store their state using cookies
# or local storage, rather than in the database.
state["hotspots"] = [] if user_profile is None else get_next_hotspots(user_profile)
if want("message"):
# Since the introduction of `anchor="latest"` in the API,
# `max_message_id` is primarily used for generating `local_id`
# values that are higher than this. We likely can eventually
# remove this parameter from the API.
user_messages = []
if user_profile is not None:
user_messages = (
UserMessage.objects.filter(user_profile=user_profile)
.order_by("-message_id")
.values("message_id")[:1]
)
if user_messages:
state["max_message_id"] = user_messages[0]["message_id"]
else:
state["max_message_id"] = -1
if want("muted_topics"):
state["muted_topics"] = [] if user_profile is None else get_topic_mutes(user_profile)
if want("muted_users"):
state["muted_users"] = [] if user_profile is None else get_user_mutes(user_profile)
if want("presence"):
state["presences"] = (
{} if user_profile is None else get_presences_for_realm(realm, slim_presence)
)
if want("realm"):
for property_name in Realm.property_types:
state["realm_" + property_name] = getattr(realm, property_name)
# Most state is handled via the property_types framework;
# these manual entries are for those realm settings that don't
# fit into that framework.
state["realm_authentication_methods"] = realm.authentication_methods_dict()
# We pretend these features are disabled because guests can't
# access them. In the future, we may want to move this logic
# to the frontends, so that we can correctly display what
# these fields are in the settings.
state["realm_allow_message_editing"] = (
False if user_profile is None else realm.allow_message_editing
)
state["realm_allow_community_topic_editing"] = (
False if user_profile is None else realm.allow_community_topic_editing
)
state["realm_allow_message_deleting"] = (
False if user_profile is None else realm.allow_message_deleting
)
state["realm_message_content_edit_limit_seconds"] = realm.message_content_edit_limit_seconds
state[
"realm_message_content_delete_limit_seconds"
] = realm.message_content_delete_limit_seconds
state[
"realm_community_topic_editing_limit_seconds"
] = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS
# This setting determines whether to send presence and also
# whether to display of users list in the right sidebar; we
# want both behaviors for logged-out users. We may in the
# future choose to move this logic to the frontend.
state["realm_presence_disabled"] = True if user_profile is None else realm.presence_disabled
state["realm_icon_url"] = realm_icon_url(realm)
state["realm_icon_source"] = realm.icon_source
state["max_icon_file_size"] = settings.MAX_ICON_FILE_SIZE
add_realm_logo_fields(state, realm)
state["realm_bot_domain"] = realm.get_bot_domain()
state["realm_uri"] = realm.uri
state["realm_available_video_chat_providers"] = realm.VIDEO_CHAT_PROVIDERS
state["settings_send_digest_emails"] = settings.SEND_DIGEST_EMAILS
state["realm_digest_emails_enabled"] = (
realm.digest_emails_enabled and settings.SEND_DIGEST_EMAILS
)
state["realm_is_zephyr_mirror_realm"] = realm.is_zephyr_mirror_realm
state["realm_email_auth_enabled"] = email_auth_enabled(realm)
state["realm_password_auth_enabled"] = password_auth_enabled(realm)
state["realm_push_notifications_enabled"] = push_notifications_enabled()
state["realm_upload_quota"] = realm.upload_quota_bytes()
state["realm_plan_type"] = realm.plan_type
state["zulip_plan_is_not_limited"] = realm.plan_type != Realm.LIMITED
state["upgrade_text_for_wide_organization_logo"] = str(Realm.UPGRADE_TEXT_STANDARD)
state["realm_default_external_accounts"] = DEFAULT_EXTERNAL_ACCOUNTS
state["jitsi_server_url"] = settings.JITSI_SERVER_URL.rstrip("/")
state["development_environment"] = settings.DEVELOPMENT
state["server_generation"] = settings.SERVER_GENERATION
state["password_min_length"] = settings.PASSWORD_MIN_LENGTH
state["password_min_guesses"] = settings.PASSWORD_MIN_GUESSES
state["max_file_upload_size_mib"] = settings.MAX_FILE_UPLOAD_SIZE
state["max_avatar_file_size_mib"] = settings.MAX_AVATAR_FILE_SIZE
state["server_inline_image_preview"] = settings.INLINE_IMAGE_PREVIEW
state["server_inline_url_embed_preview"] = settings.INLINE_URL_EMBED_PREVIEW
state["server_avatar_changes_disabled"] = settings.AVATAR_CHANGES_DISABLED
state["server_name_changes_disabled"] = settings.NAME_CHANGES_DISABLED
state["giphy_rating_options"] = realm.GIPHY_RATING_OPTIONS
if realm.notifications_stream and not realm.notifications_stream.deactivated:
notifications_stream = realm.notifications_stream
state["realm_notifications_stream_id"] = notifications_stream.id
else:
state["realm_notifications_stream_id"] = -1
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream:
state["realm_signup_notifications_stream_id"] = signup_notifications_stream.id
else:
state["realm_signup_notifications_stream_id"] = -1
state["max_stream_name_length"] = Stream.MAX_NAME_LENGTH
state["max_stream_description_length"] = Stream.MAX_DESCRIPTION_LENGTH
state["max_topic_length"] = MAX_TOPIC_NAME_LENGTH
state["max_message_length"] = MAX_MESSAGE_LENGTH
if want("realm_domains"):
state["realm_domains"] = get_realm_domains(realm)
if want("realm_emoji"):
state["realm_emoji"] = realm.get_emoji()
if want("realm_linkifiers"):
state["realm_linkifiers"] = linkifiers_for_realm(realm.id)
# Backwards compatibility code.
if want("realm_filters"):
state["realm_filters"] = realm_filters_for_realm(realm.id)
if want("realm_playgrounds"):
state["realm_playgrounds"] = get_realm_playgrounds(realm)
if want("realm_user_groups"):
state["realm_user_groups"] = user_groups_in_realm_serialized(realm)
if user_profile is not None:
settings_user = user_profile
else:
# When UserProfile=None, we want to serve the values for various
# settings as the defaults. Instead of copying the default values
# from models.py here, we access these default values from a
# temporary UserProfile object that will not be saved to the database.
#
# We also can set various fields to avoid duplicating code
# unnecessarily.
settings_user = UserProfile(
full_name="Anonymous User",
email="username@example.com",
delivery_email="username@example.com",
realm=realm,
# We tag logged-out users as guests because most guest
# restrictions apply to these users as well, and it lets
# us avoid unnecessary conditionals.
role=UserProfile.ROLE_GUEST,
avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
# ID=0 is not used in real Zulip databases, ensuring this is unique.
id=0,
)
if want("realm_user"):
state["raw_users"] = get_raw_user_data(
realm,
user_profile,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
)
state["cross_realm_bots"] = list(get_cross_realm_dicts())
# For the user's own avatar URL, we force
# client_gravatar=False, since that saves some unnecessary
# client-side code for handing medium-size avatars. See #8253
# for details.
state["avatar_source"] = settings_user.avatar_source
state["avatar_url_medium"] = avatar_url(
settings_user,
medium=True,
client_gravatar=False,
)
state["avatar_url"] = avatar_url(
settings_user,
medium=False,
client_gravatar=False,
)
state["can_create_streams"] = settings_user.can_create_streams()
state["can_subscribe_other_users"] = settings_user.can_subscribe_other_users()
state["can_invite_others_to_realm"] = settings_user.can_invite_others_to_realm()
state["is_admin"] = settings_user.is_realm_admin
state["is_owner"] = settings_user.is_realm_owner
state["is_moderator"] = settings_user.is_moderator
state["is_guest"] = settings_user.is_guest
state["user_id"] = settings_user.id
state["enter_sends"] = settings_user.enter_sends
state["email"] = settings_user.email
state["delivery_email"] = settings_user.delivery_email
state["full_name"] = settings_user.full_name
if want("realm_bot"):
state["realm_bots"] = [] if user_profile is None else get_owned_bot_dicts(user_profile)
# This does not yet have an apply_event counterpart, since currently,
# new entries for EMBEDDED_BOTS can only be added directly in the codebase.
if want("realm_embedded_bots"):
realm_embedded_bots = []
for bot in EMBEDDED_BOTS:
realm_embedded_bots.append(
{"name": bot.name, "config": load_bot_config_template(bot.name)}
)
state["realm_embedded_bots"] = realm_embedded_bots
# This does not have an apply_events counterpart either since
# this data is mostly static.
if want("realm_incoming_webhook_bots"):
realm_incoming_webhook_bots = []
for integration in WEBHOOK_INTEGRATIONS:
realm_incoming_webhook_bots.append(
{
"name": integration.name,
"config": {c[1]: c[0] for c in integration.config_options},
}
)
state["realm_incoming_webhook_bots"] = realm_incoming_webhook_bots
if want("recent_private_conversations"):
# A data structure containing records of this form:
#
# [{'max_message_id': 700175, 'user_ids': [801]}]
#
# for all recent private message conversations, ordered by the
# highest message ID in the conversation. The user_ids list
# is the list of users other than the current user in the
# private message conversation (so it is [] for PMs to self).
# Note that raw_recent_private_conversations is an
# intermediate form as a dictionary keyed by recipient_id,
# which is more efficient to update, and is rewritten to the
# final format in post_process_state.
state["raw_recent_private_conversations"] = (
{} if user_profile is None else get_recent_private_conversations(user_profile)
)
if want("subscription"):
if user_profile is not None:
sub_info = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
else:
sub_info = get_web_public_subs(realm)
state["subscriptions"] = sub_info.subscriptions
state["unsubscribed"] = sub_info.unsubscribed
state["never_subscribed"] = sub_info.never_subscribed
if want("update_message_flags") and want("message"):
# Keeping unread_msgs updated requires both message flag updates and
# message updates. This is due to the fact that new messages will not
# generate a flag update so we need to use the flags field in the
# message event.
if user_profile is not None:
state["raw_unread_msgs"] = get_raw_unread_data(user_profile)
else:
# For logged-out visitors, we treat all messages as read;
# calling this helper lets us return empty objects in the
# appropriate format.
state["raw_unread_msgs"] = extract_unread_data_from_um_rows([], user_profile)
if want("starred_messages"):
state["starred_messages"] = (
[] if user_profile is None else get_starred_message_ids(user_profile)
)
if want("stream"):
if include_streams:
# The webapp doesn't use the data from here; instead,
# it uses data from state["subscriptions"] and other
# places.
if user_profile is not None:
state["streams"] = do_get_streams(
user_profile, include_all_active=user_profile.is_realm_admin
)
else:
# TODO: This line isn't used by the webapp because it
# gets these data via the `subscriptions` key; it will
# be used when the mobile apps support logged-out
# access.
state["streams"] = get_web_public_streams(realm) # nocoverage
if want("default_streams"):
if settings_user.is_guest:
# Guest users and logged-out users don't have access to
# all default streams, so we pretend the organization
# doesn't have any.
state["realm_default_streams"] = []
else:
state["realm_default_streams"] = streams_to_dicts_sorted(
get_default_streams_for_realm(realm.id)
)
if want("default_stream_groups"):
if settings_user.is_guest:
state["realm_default_stream_groups"] = []
else:
state["realm_default_stream_groups"] = default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm)
)
if want("stop_words"):
state["stop_words"] = read_stop_words()
if want("update_display_settings"):
for prop in UserProfile.property_types:
state[prop] = getattr(settings_user, prop)
state["emojiset_choices"] = UserProfile.emojiset_choices()
if want("update_global_notifications"):
for notification in UserProfile.notification_setting_types:
state[notification] = getattr(settings_user, notification)
state["available_notification_sounds"] = get_available_notification_sounds()
if want("user_status"):
# We require creating an account to access statuses.
state["user_status"] = {} if user_profile is None else get_user_info_dict(realm_id=realm.id)
if want("video_calls"):
state["has_zoom_token"] = settings_user.zoom_token is not None
if want("giphy"):
# Normally, it would be a nasty security bug to send a
# server's API key to end users. However, GIPHY's API key
# security model is precisely to do that; every service
# publishes its API key (and GIPHY's client-side JS libraries
# require the API key to work). This security model makes
# sense because GIPHY API keys are all essentially equivalent
# in letting one search for GIFs; GIPHY only requires API keys
# to exist at all so that they can deactivate them in cases of
# abuse.
state["giphy_api_key"] = settings.GIPHY_API_KEY if settings.GIPHY_API_KEY else ""
return state
def apply_events(
user_profile: UserProfile,
*,
state: Dict[str, Any],
events: Iterable[Dict[str, Any]],
fetch_event_types: Optional[Collection[str]],
client_gravatar: bool,
slim_presence: bool,
include_subscribers: bool,
) -> None:
for event in events:
if event["type"] == "restart":
raise RestartEventException()
if fetch_event_types is not None and event["type"] not in fetch_event_types:
# TODO: continuing here is not, most precisely, correct.
# In theory, an event of one type, e.g. `realm_user`,
# could modify state that doesn't come from that
# `fetch_event_types` value, e.g. the `our_person` part of
# that code path. But it should be extremely rare, and
# fixing that will require a nontrivial refactor of
# `apply_event`. For now, be careful in your choice of
# `fetch_event_types`.
continue
apply_event(
user_profile,
state=state,
event=event,
client_gravatar=client_gravatar,
slim_presence=slim_presence,
include_subscribers=include_subscribers,
)
def apply_event(
user_profile: UserProfile,
*,
state: Dict[str, Any],
event: Dict[str, Any],
client_gravatar: bool,
slim_presence: bool,
include_subscribers: bool,
) -> None:
if event["type"] == "message":
state["max_message_id"] = max(state["max_message_id"], event["message"]["id"])
if "raw_unread_msgs" in state:
apply_unread_message_event(
user_profile,
state["raw_unread_msgs"],
event["message"],
event["flags"],
)
if event["message"]["type"] != "stream":
if "raw_recent_private_conversations" in state:
# Handle maintaining the recent_private_conversations data structure.
conversations = state["raw_recent_private_conversations"]
recipient_id = get_recent_conversations_recipient_id(
user_profile, event["message"]["recipient_id"], event["message"]["sender_id"]
)
if recipient_id not in conversations:
conversations[recipient_id] = dict(
user_ids=sorted(
user_dict["id"]
for user_dict in event["message"]["display_recipient"]
if user_dict["id"] != user_profile.id
),
)
conversations[recipient_id]["max_message_id"] = event["message"]["id"]
return
# Below, we handle maintaining first_message_id.
for sub_dict in state.get("subscriptions", []):
if event["message"]["stream_id"] == sub_dict["stream_id"]:
if sub_dict["first_message_id"] is None:
sub_dict["first_message_id"] = event["message"]["id"]
for stream_dict in state.get("streams", []):
if event["message"]["stream_id"] == stream_dict["stream_id"]:
if stream_dict["first_message_id"] is None:
stream_dict["first_message_id"] = event["message"]["id"]
elif event["type"] == "hotspots":
state["hotspots"] = event["hotspots"]
elif event["type"] == "custom_profile_fields":
state["custom_profile_fields"] = event["fields"]
custom_profile_field_ids = {field["id"] for field in state["custom_profile_fields"]}
if "raw_users" in state:
for user_dict in state["raw_users"].values():
if "profile_data" not in user_dict:
continue
profile_data = user_dict["profile_data"]
for (field_id, field_data) in list(profile_data.items()):
if int(field_id) not in custom_profile_field_ids:
del profile_data[field_id]
elif event["type"] == "realm_user":
person = event["person"]
person_user_id = person["user_id"]
if event["op"] == "add":
person = copy.deepcopy(person)
if client_gravatar:
if person["avatar_url"].startswith("https://secure.gravatar.com"):
person["avatar_url"] = None
person["is_active"] = True
if not person["is_bot"]:
person["profile_data"] = {}
state["raw_users"][person_user_id] = person
elif event["op"] == "remove":
state["raw_users"][person_user_id]["is_active"] = False
elif event["op"] == "update":
is_me = person_user_id == user_profile.id
if is_me:
if "avatar_url" in person and "avatar_url" in state:
state["avatar_source"] = person["avatar_source"]
state["avatar_url"] = person["avatar_url"]
state["avatar_url_medium"] = person["avatar_url_medium"]
if "role" in person:
state["is_admin"] = is_administrator_role(person["role"])
state["is_owner"] = person["role"] == UserProfile.ROLE_REALM_OWNER
state["is_moderator"] = person["role"] == UserProfile.ROLE_MODERATOR
state["is_guest"] = person["role"] == UserProfile.ROLE_GUEST
# Recompute properties based on is_admin/is_guest
state["can_create_streams"] = user_profile.can_create_streams()
state["can_subscribe_other_users"] = user_profile.can_subscribe_other_users()
state["can_invite_others_to_realm"] = user_profile.can_invite_others_to_realm()
# TODO: Probably rather than writing the perfect
# live-update code for the case of racing with the
# current user changing roles, we should just do a
# full refetch.
if "never_subscribed" in state:
sub_info = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
state["subscriptions"] = sub_info.subscriptions
state["unsubscribed"] = sub_info.unsubscribed
state["never_subscribed"] = sub_info.never_subscribed
if "streams" in state:
state["streams"] = do_get_streams(
user_profile, include_all_active=user_profile.is_realm_admin
)
for field in ["delivery_email", "email", "full_name"]:
if field in person and field in state:
state[field] = person[field]
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if "role" in person and "realm_bots" in state:
prev_state = state["raw_users"][user_profile.id]
was_admin = prev_state["is_admin"]
now_admin = is_administrator_role(person["role"])
if was_admin and not now_admin:
state["realm_bots"] = []
if not was_admin and now_admin:
state["realm_bots"] = get_owned_bot_dicts(user_profile)
if client_gravatar and "avatar_url" in person:
# Respect the client_gravatar setting in the `users` data.
if person["avatar_url"].startswith("https://secure.gravatar.com"):
person["avatar_url"] = None
person["avatar_url_medium"] = None
if person_user_id in state["raw_users"]:
p = state["raw_users"][person_user_id]
for field in p:
if field in person:
p[field] = person[field]
if "role" in person:
p["is_admin"] = is_administrator_role(person["role"])
p["is_owner"] = person["role"] == UserProfile.ROLE_REALM_OWNER
p["is_guest"] = person["role"] == UserProfile.ROLE_GUEST
if "custom_profile_field" in person:
custom_field_id = person["custom_profile_field"]["id"]
custom_field_new_value = person["custom_profile_field"]["value"]
if "rendered_value" in person["custom_profile_field"]:
p["profile_data"][str(custom_field_id)] = {
"value": custom_field_new_value,
"rendered_value": person["custom_profile_field"]["rendered_value"],
}
else:
p["profile_data"][str(custom_field_id)] = {
"value": custom_field_new_value,
}
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "realm_bot":
if event["op"] == "add":
state["realm_bots"].append(event["bot"])
elif event["op"] == "remove":
user_id = event["bot"]["user_id"]
for bot in state["realm_bots"]:
if bot["user_id"] == user_id:
bot["is_active"] = False
elif event["op"] == "delete":
state["realm_bots"] = [
item for item in state["realm_bots"] if item["user_id"] != event["bot"]["user_id"]
]
elif event["op"] == "update":
for bot in state["realm_bots"]:
if bot["user_id"] == event["bot"]["user_id"]:
if "owner_id" in event["bot"]:
bot_owner_id = event["bot"]["owner_id"]
bot["owner_id"] = bot_owner_id
else:
bot.update(event["bot"])
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "stream":
if event["op"] == "create":
for stream in event["streams"]:
if not stream["invite_only"]:
stream_data = copy.deepcopy(stream)
if include_subscribers:
stream_data["subscribers"] = []
# We know the stream has no traffic, and this
# field is not present in the event.
#
# TODO: Probably this should just be added to the event.
stream_data["stream_weekly_traffic"] = None
# Add stream to never_subscribed (if not invite_only)
state["never_subscribed"].append(stream_data)
if "streams" in state:
state["streams"].append(stream)
if "streams" in state:
state["streams"].sort(key=lambda elt: elt["name"])
if event["op"] == "delete":
deleted_stream_ids = {stream["stream_id"] for stream in event["streams"]}
if "streams" in state:
state["streams"] = [
s for s in state["streams"] if s["stream_id"] not in deleted_stream_ids
]
state["never_subscribed"] = [
stream
for stream in state["never_subscribed"]
if stream["stream_id"] not in deleted_stream_ids
]
if event["op"] == "update":
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state["subscriptions"]:
if obj["name"].lower() == event["name"].lower():
obj[event["property"]] = event["value"]
if event["property"] == "description":
obj["rendered_description"] = event["rendered_description"]
# Also update the pure streams data
if "streams" in state:
for stream in state["streams"]:
if stream["name"].lower() == event["name"].lower():
prop = event["property"]
if prop in stream:
stream[prop] = event["value"]
if prop == "description":
stream["rendered_description"] = event["rendered_description"]
elif event["type"] == "default_streams":
state["realm_default_streams"] = event["default_streams"]
elif event["type"] == "default_stream_groups":
state["realm_default_stream_groups"] = event["default_stream_groups"]
elif event["type"] == "realm":
if event["op"] == "update":
field = "realm_" + event["property"]
state[field] = event["value"]
if event["property"] == "plan_type":
# Then there are some extra fields that also need to be set.
state["zulip_plan_is_not_limited"] = event["value"] != Realm.LIMITED
state["realm_upload_quota"] = event["extra_data"]["upload_quota"]
policy_permission_dict = {
"create_stream_policy": "can_create_streams",
"invite_to_stream_policy": "can_subscribe_other_users",
"invite_to_realm_policy": "can_invite_others_to_realm",
}
# Tricky interaction: Whether we can create streams and can subscribe other users
# can get changed here.
if field == "realm_waiting_period_threshold":
for policy, permission in policy_permission_dict.items():
if permission in state:
state[permission] = user_profile.has_permission(policy)
if event["property"] in policy_permission_dict.keys():
if policy_permission_dict[event["property"]] in state:
state[policy_permission_dict[event["property"]]] = user_profile.has_permission(
event["property"]
)
elif event["op"] == "update_dict":
for key, value in event["data"].items():
state["realm_" + key] = value
# It's a bit messy, but this is where we need to
# update the state for whether password authentication
# is enabled on this server.
if key == "authentication_methods":
state["realm_password_auth_enabled"] = value["Email"] or value["LDAP"]
state["realm_email_auth_enabled"] = value["Email"]
elif event["op"] == "deactivated":
# The realm has just been deactivated. If our request had
# arrived a moment later, we'd have rendered the
# deactivation UI; if it'd been a moment sooner, we've
# have rendered the app and then immediately got this
# event (or actually, more likely, an auth error on GET
# /events) and immediately reloaded into the same
# deactivation UI. Passing achieves the same result.
pass
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "subscription":
if event["op"] == "add":
added_stream_ids = {sub["stream_id"] for sub in event["subscriptions"]}
was_added = lambda s: s["stream_id"] in added_stream_ids
existing_stream_ids = {sub["stream_id"] for sub in state["subscriptions"]}
# add the new subscriptions
for sub in event["subscriptions"]:
if sub["stream_id"] not in existing_stream_ids:
if "subscribers" in sub and not include_subscribers:
sub = copy.deepcopy(sub)
del sub["subscribers"]
state["subscriptions"].append(sub)
# remove them from unsubscribed if they had been there
state["unsubscribed"] = [s for s in state["unsubscribed"] if not was_added(s)]
# remove them from never_subscribed if they had been there
state["never_subscribed"] = [s for s in state["never_subscribed"] if not was_added(s)]
elif event["op"] == "remove":
removed_stream_ids = {sub["stream_id"] for sub in event["subscriptions"]}
was_removed = lambda s: s["stream_id"] in removed_stream_ids
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state["subscriptions"]))
# Remove our user from the subscribers of the removed subscriptions.
if include_subscribers:
for sub in removed_subs:
sub["subscribers"].remove(user_profile.id)
state["unsubscribed"] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state["subscriptions"] = [s for s in state["subscriptions"] if not was_removed(s)]
elif event["op"] == "update":
for sub in state["subscriptions"]:
if sub["stream_id"] == event["stream_id"]:
sub[event["property"]] = event["value"]
elif event["op"] == "peer_add":
if include_subscribers:
stream_ids = set(event["stream_ids"])
user_ids = set(event["user_ids"])
for sub_dict in [
state["subscriptions"],
state["unsubscribed"],
state["never_subscribed"],
]:
for sub in sub_dict:
if sub["stream_id"] in stream_ids:
subscribers = set(sub["subscribers"]) | user_ids
sub["subscribers"] = sorted(list(subscribers))
elif event["op"] == "peer_remove":
if include_subscribers:
stream_ids = set(event["stream_ids"])
user_ids = set(event["user_ids"])
for sub_dict in [
state["subscriptions"],
state["unsubscribed"],
state["never_subscribed"],
]:
for sub in sub_dict:
if sub["stream_id"] in stream_ids:
subscribers = set(sub["subscribers"]) - user_ids
sub["subscribers"] = sorted(list(subscribers))
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "presence":
if slim_presence:
user_key = str(event["user_id"])
else:
user_key = event["email"]
state["presences"][user_key] = get_presence_for_user(event["user_id"], slim_presence)[
user_key
]
elif event["type"] == "update_message":
# We don't return messages in /register, so we don't need to
# do anything for content updates, but we may need to update
# the unread_msgs data if the topic of an unread message changed.
if "new_stream_id" in event:
stream_dict = state["raw_unread_msgs"]["stream_dict"]
stream_id = event["new_stream_id"]
for message_id in event["message_ids"]:
if message_id in stream_dict:
stream_dict[message_id]["stream_id"] = stream_id
if TOPIC_NAME in event:
stream_dict = state["raw_unread_msgs"]["stream_dict"]
topic = event[TOPIC_NAME]
for message_id in event["message_ids"]:
if message_id in stream_dict:
stream_dict[message_id]["topic"] = topic
elif event["type"] == "delete_message":
if "message_id" in event:
message_ids = [event["message_id"]]
else:
message_ids = event["message_ids"] # nocoverage
max_message = (
Message.objects.filter(usermessage__user_profile=user_profile).order_by("-id").first()
)
if max_message:
state["max_message_id"] = max_message.id
else:
state["max_message_id"] = -1
if "raw_unread_msgs" in state:
for remove_id in message_ids:
remove_message_id_from_unread_mgs(state["raw_unread_msgs"], remove_id)
# The remainder of this block is about maintaining recent_private_conversations
if "raw_recent_private_conversations" not in state or event["message_type"] != "private":
return
recipient_id = get_recent_conversations_recipient_id(
user_profile, event["recipient_id"], event["sender_id"]
)
# Ideally, we'd have test coverage for these two blocks. To
# do that, we'll need a test where we delete not-the-latest
# messages or delete a private message not in
# recent_private_conversations.
if recipient_id not in state["raw_recent_private_conversations"]: # nocoverage
return
old_max_message_id = state["raw_recent_private_conversations"][recipient_id][
"max_message_id"
]
if old_max_message_id not in message_ids: # nocoverage
return
# OK, we just deleted what had been the max_message_id for
# this recent conversation; we need to recompute that value
# from scratch. Definitely don't need to re-query everything,
# but this case is likely rare enough that it's reasonable to do so.
state["raw_recent_private_conversations"] = get_recent_private_conversations(user_profile)
elif event["type"] == "reaction":
# The client will get the message with the reactions directly
pass
elif event["type"] == "submessage":
# The client will get submessages with their messages
pass
elif event["type"] == "typing":
# Typing notification events are transient and thus ignored
pass
elif event["type"] == "attachment":
# Attachment events are just for updating the "uploads" UI;
# they are not sent directly.
pass
elif event["type"] == "update_message_flags":
# We don't return messages in `/register`, so most flags we
# can ignore, but we do need to update the unread_msgs data if
# unread state is changed.
if "raw_unread_msgs" in state and event["flag"] == "read" and event["op"] == "add":
for remove_id in event["messages"]:
remove_message_id_from_unread_mgs(state["raw_unread_msgs"], remove_id)
if event["flag"] == "starred" and "starred_messages" in state:
if event["op"] == "add":
state["starred_messages"] += event["messages"]
if event["op"] == "remove":
state["starred_messages"] = [
message
for message in state["starred_messages"]
if not (message in event["messages"])
]
elif event["type"] == "realm_domains":
if event["op"] == "add":
state["realm_domains"].append(event["realm_domain"])
elif event["op"] == "change":
for realm_domain in state["realm_domains"]:
if realm_domain["domain"] == event["realm_domain"]["domain"]:
realm_domain["allow_subdomains"] = event["realm_domain"]["allow_subdomains"]
elif event["op"] == "remove":
state["realm_domains"] = [
realm_domain
for realm_domain in state["realm_domains"]
if realm_domain["domain"] != event["domain"]
]
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "realm_emoji":
state["realm_emoji"] = event["realm_emoji"]
elif event["type"] == "realm_export":
# These realm export events are only available to
# administrators, and aren't included in page_params.
pass
elif event["type"] == "alert_words":
state["alert_words"] = event["alert_words"]
elif event["type"] == "muted_topics":
state["muted_topics"] = event["muted_topics"]
elif event["type"] == "muted_users":
state["muted_users"] = event["muted_users"]
elif event["type"] == "realm_filters":
state["realm_filters"] = event["realm_filters"]
elif event["type"] == "realm_linkifiers":
state["realm_linkifiers"] = event["realm_linkifiers"]
elif event["type"] == "realm_playgrounds":
state["realm_playgrounds"] = event["realm_playgrounds"]
elif event["type"] == "update_display_settings":
assert event["setting_name"] in UserProfile.property_types
state[event["setting_name"]] = event["setting"]
elif event["type"] == "update_global_notifications":
assert event["notification_name"] in UserProfile.notification_setting_types
state[event["notification_name"]] = event["setting"]
elif event["type"] == "invites_changed":
pass
elif event["type"] == "user_group":
if event["op"] == "add":
state["realm_user_groups"].append(event["group"])
state["realm_user_groups"].sort(key=lambda group: group["id"])
elif event["op"] == "update":
for user_group in state["realm_user_groups"]:
if user_group["id"] == event["group_id"]:
user_group.update(event["data"])
elif event["op"] == "add_members":
for user_group in state["realm_user_groups"]:
if user_group["id"] == event["group_id"]:
user_group["members"].extend(event["user_ids"])
user_group["members"].sort()
elif event["op"] == "remove_members":
for user_group in state["realm_user_groups"]:
if user_group["id"] == event["group_id"]:
members = set(user_group["members"])
user_group["members"] = list(members - set(event["user_ids"]))
user_group["members"].sort()
elif event["op"] == "remove":
state["realm_user_groups"] = [
ug for ug in state["realm_user_groups"] if ug["id"] != event["group_id"]
]
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "user_status":
user_id_str = str(event["user_id"])
user_status = state["user_status"]
away = event.get("away")
status_text = event.get("status_text")
if user_id_str not in user_status:
user_status[user_id_str] = {}
if away is not None:
if away:
user_status[user_id_str]["away"] = True
else:
user_status[user_id_str].pop("away", None)
if status_text is not None:
if status_text == "":
user_status[user_id_str].pop("status_text", None)
else:
user_status[user_id_str]["status_text"] = status_text
if not user_status[user_id_str]:
user_status.pop(user_id_str, None)
state["user_status"] = user_status
elif event["type"] == "has_zoom_token":
state["has_zoom_token"] = event["value"]
else:
raise AssertionError("Unexpected event type {}".format(event["type"]))
def do_events_register(
user_profile: UserProfile,
user_client: Client,
apply_markdown: bool = True,
client_gravatar: bool = False,
slim_presence: bool = False,
event_types: Optional[Sequence[str]] = None,
queue_lifespan_secs: int = 0,
all_public_streams: bool = False,
include_subscribers: bool = True,
include_streams: bool = True,
client_capabilities: Dict[str, bool] = {},
narrow: Collection[Sequence[str]] = [],
fetch_event_types: Optional[Collection[str]] = None,
) -> Dict[str, Any]:
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
notification_settings_null = client_capabilities.get("notification_settings_null", False)
bulk_message_deletion = client_capabilities.get("bulk_message_deletion", False)
user_avatar_url_field_optional = client_capabilities.get(
"user_avatar_url_field_optional", False
)
stream_typing_notifications = client_capabilities.get("stream_typing_notifications", False)
if user_profile.realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
# If real email addresses are not available to the user, their
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
if fetch_event_types is not None:
event_types_set: Optional[Set[str]] = set(fetch_event_types)
elif event_types is not None:
event_types_set = set(event_types)
else:
event_types_set = None
# Fill up the UserMessage rows if a soft-deactivated user has returned
reactivate_user_if_soft_deactivated(user_profile)
while True:
# Note that we pass event_types, not fetch_event_types here, since
# that's what controls which future events are sent.
queue_id = request_event_queue(
user_profile,
user_client,
apply_markdown,
client_gravatar,
slim_presence,
queue_lifespan_secs,
event_types,
all_public_streams,
narrow=narrow,
bulk_message_deletion=bulk_message_deletion,
stream_typing_notifications=stream_typing_notifications,
)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
ret = fetch_initial_state_data(
user_profile,
event_types=event_types_set,
queue_id=queue_id,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
slim_presence=slim_presence,
include_subscribers=include_subscribers,
include_streams=include_streams,
)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
try:
apply_events(
user_profile,
state=ret,
events=events,
fetch_event_types=fetch_event_types,
client_gravatar=client_gravatar,
slim_presence=slim_presence,
include_subscribers=include_subscribers,
)
except RestartEventException:
# This represents a rare race condition, where Tornado
# restarted (and sent `restart` events) while we were waiting
# for fetch_initial_state_data to return. To avoid the client
# needing to reload shortly after loading, we recursively call
# do_events_register here.
continue
else:
break
post_process_state(user_profile, ret, notification_settings_null)
if len(events) > 0:
ret["last_event_id"] = events[-1]["id"]
else:
ret["last_event_id"] = -1
return ret
def post_process_state(
user_profile: Optional[UserProfile], ret: Dict[str, Any], notification_settings_null: bool
) -> None:
"""
NOTE:
Below is an example of post-processing initial state data AFTER we
apply events. For large payloads like `unread_msgs`, it's helpful
to have an intermediate data structure that is easy to manipulate
with O(1)-type operations as we apply events.
Then, only at the end, we put it in the form that's more appropriate
for client.
"""
if "raw_unread_msgs" in ret:
ret["unread_msgs"] = aggregate_unread_data(ret["raw_unread_msgs"])
del ret["raw_unread_msgs"]
"""
See the note above; the same technique applies below.
"""
if "raw_users" in ret:
user_dicts = list(ret["raw_users"].values())
user_dicts = sorted(user_dicts, key=lambda x: x["user_id"])
ret["realm_users"] = [d for d in user_dicts if d["is_active"]]
ret["realm_non_active_users"] = [d for d in user_dicts if not d["is_active"]]
"""
Be aware that we do intentional aliasing in the below code.
We can now safely remove the `is_active` field from all the
dicts that got partitioned into the two lists above.
We remove the field because it's already implied, and sending
it to clients makes clients prone to bugs where they "trust"
the field but don't actually update in live updates. It also
wastes bandwidth.
"""
for d in user_dicts:
d.pop("is_active")
del ret["raw_users"]
if "raw_recent_private_conversations" in ret:
# Reformat recent_private_conversations to be a list of dictionaries, rather than a dict.
ret["recent_private_conversations"] = sorted(
(
dict(
**value,
)
for (recipient_id, value) in ret["raw_recent_private_conversations"].items()
),
key=lambda x: -x["max_message_id"],
)
del ret["raw_recent_private_conversations"]
if not notification_settings_null and "subscriptions" in ret:
for stream_dict in ret["subscriptions"] + ret["unsubscribed"]:
handle_stream_notifications_compatibility(
user_profile, stream_dict, notification_settings_null
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright IBM Corp. 2016
# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/
DOCUMENTATION = '''
---
module: vmware_local_user_manager
short_description: Manage local users on an ESXi host
description:
- Manage local users on an ESXi host
version_added: "2.2"
author: Andreas Nafpliotis
notes:
- Tested on ESXi 6.0
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
requirements:
- "python >= 2.6"
- PyVmomi installed
options:
local_user_name:
description:
- The local user name to be changed
required: True
local_user_password:
description:
- The password to be set
required: False
local_user_description:
description:
- Description for the user
required: False
state:
description:
- Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_user_manager command from Ansible Playbooks
- name: Add local user to ESXi
local_action:
module: vmware_local_user_manager
hostname: esxi_hostname
username: root
password: vmware
local_user_name: foo
'''
RETURN = '''# '''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareLocalUserManager(object):
def __init__(self, module):
self.module = module
self.content = connect_to_api(self.module)
self.local_user_name = self.module.params['local_user_name']
self.local_user_password = self.module.params['local_user_password']
self.local_user_description = self.module.params['local_user_description']
self.state = self.module.params['state']
def process_state(self):
try:
local_account_manager_states = {
'absent': {
'present': self.state_remove_user,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_user,
'absent': self.state_create_user,
}
}
local_account_manager_states[self.state][self.check_local_user_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_user_manager_state(self):
user_account = self.find_user_account()
if not user_account:
return 'absent'
else:
return 'present'
def find_user_account(self):
searchStr = self.local_user_name
exactMatch = True
findUsers = True
findGroups = False
user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
return user_account
def create_account_spec(self):
account_spec = vim.host.LocalAccountManager.AccountSpecification()
account_spec.id = self.local_user_name
account_spec.password = self.local_user_password
account_spec.description = self.local_user_description
return account_spec
def state_create_user(self):
account_spec = self.create_account_spec()
try:
task = self.content.accountManager.CreateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_update_user(self):
account_spec = self.create_account_spec()
try:
task = self.content.accountManager.UpdateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_remove_user(self):
try:
task = self.content.accountManager.RemoveUser(self.local_user_name)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
local_user_password=dict(required=False, type='str', no_log=True),
local_user_description=dict(required=False, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_local_user_manager = VMwareLocalUserManager(module)
vmware_local_user_manager.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
title: Route Module
order: 3
---
# Route Module
[MODES: framework]
## Introduction
The files referenced in `routes.ts` are called Route Modules.
```tsx filename=app/routes.ts
route("teams/:teamId", "./team.tsx"),
// route module ^^^^^^^^
```
Route modules are the foundation of React Router's framework features, they define:
- automatic code-splitting
- data loading
- actions
- revalidation
- error boundaries
- and more
This guide is a quick overview of every route module feature. The rest of the getting started guides will cover these features in more detail.
## Component (`default`)
The `default` export in a route module defines the component that will render when the route matches.
```tsx filename=app/routes/my-route.tsx
export default function MyRouteComponent() {
return (
<div>
<h1>Look ma!</h1>
<p>
I'm still using React Router after like 10 years.
</p>
</div>
);
}
```
### Props passed to the Component
When the component is rendered, it is provided the props defined in `Route.ComponentProps` that React Router will automatically generate for you. These props include:
1. `loaderData`: The data returned from the `loader` function in this route module
2. `actionData`: The data returned from the `action` function in this route module
3. `params`: An object containing the route parameters (if any).
4. `matches`: An array of all the matches in the current route tree.
You can use these props in place of hooks like `useLoaderData` or `useParams`. This may be preferable because they will be automatically typed correctly for the route.
### Using props
```tsx filename=app/routes/my-route-with-default-params.tsx
import type { Route } from "./+types/route-name";
export default function MyRouteComponent({
loaderData,
actionData,
params,
matches,
}: Route.ComponentProps) {
return (
<div>
<h1>Welcome to My Route with Props!</h1>
<p>Loader Data: {JSON.stringify(loaderData)}</p>
<p>Action Data: {JSON.stringify(actionData)}</p>
<p>Route Parameters: {JSON.stringify(params)}</p>
<p>Matched Routes: {JSON.stringify(matches)}</p>
</div>
);
}
```
## `middleware`
Route [middleware][middleware] runs sequentially on the server before and after document and
data requests. This gives you a singular place to do things like logging,
authentication, and post-processing of responses. The `next` function continues down the chain, and on the leaf route the `next` function executes the loaders/actions for the navigation.
Here's an example middleware to log requests on the server:
```tsx filename=root.tsx
async function loggingMiddleware(
{ request, context },
next,
) {
console.log(
`${new Date().toISOString()} ${request.method} ${request.url}`,
);
const start = performance.now();
const response = await next();
const duration = performance.now() - start;
console.log(
`${new Date().toISOString()} Response ${response.status} (${duration}ms)`,
);
return response;
}
export const middleware = [loggingMiddleware];
```
Here's an example middleware to check for logged in users and set the user in
`context` you can then access from loaders:
```tsx filename=routes/_auth.tsx
async function authMiddleware({ request, context }) {
const session = await getSession(request);
const userId = session.get("userId");
if (!userId) {
throw redirect("/login");
}
const user = await getUserById(userId);
context.set(userContext, user);
}
export const middleware = [authMiddleware];
```
<docs-warning>Please make sure you understand [when middleware runs][when-middleware-runs] to make sure your application will behave the way you intend when adding middleware to your routes.</docs-warning>
See also:
- [`middleware` params][middleware-params]
- [Middleware][middleware]
## `clientMiddleware`
This is the client-side equivalent of `middleware` and runs in the browser during client navigations. The only difference from server middleware is that client middleware doesn't return Responses because they're not wrapping an HTTP request on the server.
Here's an example middleware to log requests on the client:
```tsx filename=root.tsx
async function loggingMiddleware(
{ request, context },
next,
) {
console.log(
`${new Date().toISOString()} ${request.method} ${request.url}`,
);
const start = performance.now();
await next(); // 👈 No Response returned
const duration = performance.now() - start;
console.log(
`${new Date().toISOString()} (${duration}ms)`,
);
// ✅ No need to return anything
}
export const clientMiddleware = [loggingMiddleware];
```
See also:
- [Middleware][middleware]
- [Client Data][client-data]
## `loader`
Route loaders provide data to route components before they are rendered. They are only called on the server when server rendering or during the build with pre-rendering.
```tsx
export async function loader() {
return { message: "Hello, world!" };
}
export default function MyRoute({ loaderData }) {
return <h1>{loaderData.message}</h1>;
}
```
See also:
- [`loader` params][loader-params]
## `clientLoader`
Called only in the browser, route client loaders provide data to route components in addition to, or in place of, route loaders.
```tsx
export async function clientLoader({ serverLoader }) {
// call the server loader
const serverData = await serverLoader();
// And/or fetch data on the client
const data = getDataFromClient();
// Return the data to expose through useLoaderData()
return data;
}
```
Client loaders can participate in initial page load hydration of server rendered pages by setting the `hydrate` property on the function:
```tsx
export async function clientLoader() {
// ...
}
clientLoader.hydrate = true as const;
```
<docs-info>
By using `as const`, TypeScript will infer that the type for `clientLoader.hydrate` is `true` instead of `boolean`.
That way, React Router can derive types for `loaderData` based on the value of `clientLoader.hydrate`.
</docs-info>
See also:
- [`clientLoader` params][client-loader-params]
- [Client Data][client-data]
## `action`
Route actions allow server-side data mutations with automatic revalidation of all loader data on the page when called from `<Form>`, `useFetcher`, and `useSubmit`.
```tsx
// route("/list", "./list.tsx")
import { Form } from "react-router";
import { TodoList } from "~/components/TodoList";
// this data will be loaded after the action completes...
export async function loader() {
const items = await fakeDb.getItems();
return { items };
}
// ...so that the list here is updated automatically
export default function Items({ loaderData }) {
return (
<div>
<List items={loaderData.items} />
<Form method="post" navigate={false} action="/list">
<input type="text" name="title" />
<button type="submit">Create Todo</button>
</Form>
</div>
);
}
export async function action({ request }) {
const data = await request.formData();
const todo = await fakeDb.addItem({
title: data.get("title"),
});
return { ok: true };
}
```
See also:
- [`action` params][action-params]
## `clientAction`
Like route actions but only called in the browser.
```tsx
export async function clientAction({ serverAction }) {
fakeInvalidateClientSideCache();
// can still call the server action if needed
const data = await serverAction();
return data;
}
```
See also:
- [`clientAction` params][client-action-params]
- [Client Data][client-data]
## `ErrorBoundary`
When other route module APIs throw, the route module `ErrorBoundary` will render instead of the route component.
```tsx
import {
isRouteErrorResponse,
useRouteError,
} from "react-router";
export function ErrorBoundary() {
const error = useRouteError();
if (isRouteErrorResponse(error)) {
return (
<div>
<h1>
{error.status} {error.statusText}
</h1>
<p>{error.data}</p>
</div>
);
} else if (error instanceof Error) {
return (
<div>
<h1>Error</h1>
<p>{error.message}</p>
<p>The stack trace is:</p>
<pre>{error.stack}</pre>
</div>
);
} else {
return <h1>Unknown Error</h1>;
}
}
```
See also:
- [`useRouteError`][use-route-error]
- [`isRouteErrorResponse`][is-route-error-response]
## `HydrateFallback`
On initial page load, the route component renders only after the client loader is finished. If exported, a `HydrateFallback` can render immediately in place of the route component.
```tsx filename=routes/client-only-route.tsx
export async function clientLoader() {
const data = await fakeLoadLocalGameData();
return data;
}
export function HydrateFallback() {
return <p>Loading Game...</p>;
}
export default function Component({ loaderData }) {
return <Game data={loaderData} />;
}
```
## `headers`
The route `headers` function defines the HTTP headers to be sent with the response when server rendering.
```tsx
export function headers() {
return {
"X-Stretchy-Pants": "its for fun",
"Cache-Control": "max-age=300, s-maxage=3600",
};
}
```
See also:
- [`Headers`][headers]
## `handle`
Route handle allows apps to add anything to a route match in `useMatches` to create abstractions (like breadcrumbs, etc.).
```tsx
export const handle = {
its: "all yours",
};
```
See also:
- [`useMatches`][use-matches]
## `links`
Route links define [`<link>` element][link-element]s to be rendered in the document `<head>`.
```tsx
export function links() {
return [
{
rel: "icon",
href: "/favicon.png",
type: "image/png",
},
{
rel: "stylesheet",
href: "https://example.com/some/styles.css",
},
{
rel: "preload",
href: "/images/banner.jpg",
as: "image",
},
];
}
```
All routes links will be aggregated and rendered through the `<Links />` component, usually rendered in your app root:
```tsx
import { Links } from "react-router";
export default function Root() {
return (
<html>
<head>
<Links />
</head>
<body />
</html>
);
}
```
## `meta`
Route meta defines [meta tags][meta-element] to be rendered in the `<Meta />` component, usually placed in the `<head>`.
<docs-warning>
Since React 19, [using the built-in `<meta>` element](https://react.dev/reference/react-dom/components/meta) is recommended over the use of the route module's `meta` export.
Here is an example of how to use it and the `<title>` element:
```tsx
export default function MyRoute() {
return (
<div>
<title>Very cool app</title>
<meta property="og:title" content="Very cool app" />
<meta
name="description"
content="This app is the best"
/>
{/* The rest of your route content... */}
</div>
);
}
```
</docs-warning>
```tsx filename=app/product.tsx
export function meta() {
return [
{ title: "Very cool app" },
{
property: "og:title",
content: "Very cool app",
},
{
name: "description",
content: "This app is the best",
},
];
}
```
```tsx filename=app/root.tsx
import { Meta } from "react-router";
export default function Root() {
return (
<html>
<head>
<Meta />
</head>
<body />
</html>
);
}
```
The meta of the last matching route is used, allowing you to override parent routes' meta. It's important to note that the entire meta descriptor array is replaced, not merged. This gives you the flexibility to build your own meta composition logic across pages at different levels.
**See also**
- [`meta` params][meta-params]
- [`meta` function return types][meta-function]
## `shouldRevalidate`
In framework mode with SSR, route loaders are automatically revalidated after all navigations and form submissions (this is different from [Data Mode][data-mode-should-revalidate]). This enables middleware and loaders to share a request context and optimize in different ways than they would in Data Mode.
Defining this function allows you to opt out of revalidation for a route loader for navigations and form submissions.
```tsx
import type { ShouldRevalidateFunctionArgs } from "react-router";
export function shouldRevalidate(
arg: ShouldRevalidateFunctionArgs,
) {
return true;
}
```
When using [SPA Mode][spa-mode], there are no server loaders to call on navigations, so `shouldRevalidate` behaves the same as it does in [Data Mode][data-mode-should-revalidate].
[`ShouldRevalidateFunctionArgs` Reference Documentation ↗](https://api.reactrouter.com/v7/interfaces/react-router.ShouldRevalidateFunctionArgs.html)
---
Next: [Rendering Strategies](./rendering)
[middleware-params]: https://api.reactrouter.com/v7/types/react-router.MiddlewareFunction.html
[middleware]: ../../how-to/middleware
[when-middleware-runs]: ../../how-to/middleware#when-middleware-runs
[loader-params]: https://api.reactrouter.com/v7/interfaces/react-router.LoaderFunctionArgs
[client-loader-params]: https://api.reactrouter.com/v7/types/react-router.ClientLoaderFunctionArgs
[action-params]: https://api.reactrouter.com/v7/interfaces/react-router.ActionFunctionArgs
[client-action-params]: https://api.reactrouter.com/v7/types/react-router.ClientActionFunctionArgs
[use-route-error]: ../../api/hooks/useRouteError
[is-route-error-response]: ../../api/utils/isRouteErrorResponse
[headers]: https://developer.mozilla.org/en-US/docs/Web/API/Response/headers
[use-matches]: ../../api/hooks/useMatches
[link-element]: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link
[meta-element]: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta
[meta-params]: https://api.reactrouter.com/v7/interfaces/react-router.MetaArgs
[meta-function]: https://api.reactrouter.com/v7/types/react-router.MetaDescriptor.html
[data-mode-should-revalidate]: ../data/route-object#shouldrevalidate
[spa-mode]: ../../how-to/spa
[client-data]: ../../how-to/client-data
|
unknown
|
github
|
https://github.com/remix-run/react-router
|
docs/start/framework/route-module.md
|
"""This file contains tests for the technicals module of the DJ Scrooge backtesting API.
Copyright (C) 2012 James Adam Cataldo
This file is part of Pengoe.
Pengoe is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pengoe. If not, see <http://www.gnu.org/licenses/>.
"""
from djscrooge.technicals import simple_moving_average, accumulate, \
channel_breakout, channel_normalization, on_balance_volume, \
accumulation_distribution_volume, money_flow, negative_volume_index, \
advance_decline_ratio, net_volume_ratio, high_low_ratio, capm
from proboscis import test
from proboscis.asserts import assert_equal, assert_true
@test
def test_simple_moving_average():
"""Test the simple_moving_average function."""
expected = [0.0, 0.5, 1.5, 2.5, 3.5, 4.5]
actual = simple_moving_average(range(0, 6), 2)
assert_equal(actual, expected)
@test
def test_accumulate():
"""Test the accumulate function."""
values = [1, 2, 3, 4]
actual = accumulate(values)
expected = [1, 3, 6, 10]
assert_equal(actual, expected)
@test
def test_channel_breakout():
"""Test the channel_breakout function."""
values = [0, 1, 4, 2, 3, 1, 6, 0]
window = 2
actual = channel_breakout(values, window)
expected = [0, 0, 1, 0, 0, -1, 1, -1]
assert_equal(actual, expected)
@test
def test_channel_normalization():
"""Test the channel_normalization function."""
values = [0, 1, 4, 2, 3, 1, 6, 0]
window = 3
actual = channel_normalization(values, window)
expected = [50.0, 100.0, 100.0, 100.0/3.0, 50.0, 0.0, 100.0, 0.0]
assert_equal(actual, expected)
@test
def test_on_balance_volume():
"""Tets the on_balance_volume function."""
prices = [1, 2, 2, 1]
volumes = [1, 2, 3, 4]
actual = on_balance_volume(prices, volumes)
expected = [0, 2, 0, -4]
assert_equal(actual, expected)
@test
def test_accumulation_distribution_volume():
"""Test the accumulation_distribution_volume function."""
high_prices = [4, 4, 4, 4, 4]
low_prices = [0, 0, 0, 0, 0]
close_prices = [2, 1, 3, 0, 4]
volumes = [1, 1, 1, 1, 1]
actual = accumulation_distribution_volume(high_prices, low_prices, close_prices, volumes)
expected = [0.0, -0.5, 0.5, -1.0, 1.0]
assert_equal(actual, expected)
@test
def test_money_flow():
"""Test the money_flow function."""
high_prices = [4, 4, 4, 4, 4]
low_prices = [0, 0, 0, 0, 0]
close_prices = [2, 1, 3, 0, 4]
volumes = [1, 1, 1, 1, 1]
actual = money_flow(high_prices, low_prices, close_prices, volumes)
expected = [0.0, -0.5 * (5.0/3.0), 0.5 * (7.0/3.0), -1.0 * (4.0/3.0), 1.0 * (8.0/3.0)]
assert_equal(actual, expected)
@test
def test_negative_volume_index():
"""Test the negative_volume_index function."""
prices = [1, 2, 4, 8]
volumes = [1, 2, 1, 2]
actual = negative_volume_index(prices, volumes)
expected = [0.0, 0.0, 100.0, 0.0]
assert_equal(actual, expected)
@test
def test_advance_decline_ratio():
"""Test the advance_decline_ratio function."""
advancing_issues = [1, 2, 3]
declining_issues = [3, 2, 1]
unchanged_issues = [1, 0, 2]
actual = advance_decline_ratio(advancing_issues, declining_issues, unchanged_issues)
expected = [-0.4, 0.0, 1.0 / 3.0]
assert_equal(actual, expected)
@test
def test_net_volume_ratio():
"""Test the net_volume_ratio function."""
up_volume = [1, 2, 3]
down_volume = [3, 2, 1]
unchanged_volume = [1, 0, 2]
actual = net_volume_ratio(up_volume, down_volume, unchanged_volume)
expected = [-0.4, 0.0, 1.0 / 3.0]
assert_equal(actual, expected)
@test
def test_high_low_ratio():
"""Test the high_low_ratio function."""
advancing_issues = [1, 2, 3]
declining_issues = [3, 2, 1]
unchanged_issues = [1, 0, 2]
new_highs = [1, 2, 3]
new_lows = [3, 2, 1]
actual = high_low_ratio(advancing_issues, declining_issues, unchanged_issues, new_highs, new_lows)
expected = [-0.4, 0.0, 1.0 / 3.0]
assert_equal(actual, expected)
@test
def test_capm():
"""Test the camp function."""
investment = [1.0, 1.21, 4.84]
market = [1.0, 1.1, 2.2]
(alpha, beta, r) = capm(investment, market)
assert_true(abs(alpha) < 1.0e-8)
assert_true(abs(beta - 2.0) < 1.0e-8)
assert_true(abs(r - 1.0) < 1.0e-8)
if __name__ == "__main__":
from proboscis import TestProgram
TestProgram().run_and_exit()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, using, this
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
__all__ = ['CSharpLexer', 'BooLexer', 'VbNetLexer', 'CSharpAspxLexer',
'VbNetAspxLexer']
def _escape(st):
return st.replace(u'\\', ur'\\').replace(u'-', ur'\-').\
replace(u'[', ur'\[').replace(u']', ur'\]')
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 0.8.*
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers,
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
'full': ('@?(?:_|[^' +
_escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl')) + '])'
+ '[^' + _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
'Nl', 'Nd', 'Pc', 'Cf', 'Mn',
'Mc')) + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(\\\\|\\"|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|base|break|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|lock|new|null|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
('"""(\\\\|\\"|.*?)"""', String.Double),
('"(\\\\|\\"|[^"]*?)"', String.Double),
("'(\\\\|\\'|[^']*?)'", String.Single),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9\.]*(m|ms|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[\(\){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(r'(?<!\.)(AddHandler|Alias|'
r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
r'Implements|Inherits|Interface|'
r'Let|Lib|Loop|Me|MustInherit|'
r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
r'Operator|Option|Optional|Overloads|Overridable|'
r'Overrides|ParamArray|Partial|Private|Protected|'
r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
r'Return|Select|Set|Shadows|Shared|Single|'
r'Static|Step|Stop|SyncLock|Then|'
r'Throw|To|True|Try|TryCast|Wend|'
r'Using|When|While|Widening|With|WithEvents|'
r'WriteOnly)\b', Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>]',
Operator),
('"', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'_\n', Text), # Line continuation
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'),
(r'', Text, '#pop'), # any other syntax
],
'funcname': [
(r'[a-z_][a-z0-9_]*', Name.Function, '#pop'),
],
'classname': [
(r'[a-z_][a-z0-9_]*', Name.Class, '#pop'),
],
'namespace': [
(r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
(r'', Text, '#pop'),
]
}
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
#TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highligting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
return 0.001 # TODO really only for when filename matched...
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highligting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.test import TestCase
from django.http import HttpRequest, HttpResponse, HttpResponseBadRequest
from django.template import Template, Context
from django.template.loader import render_to_string
from django.conf import settings
from honeypot.middleware import HoneypotViewMiddleware, HoneypotResponseMiddleware
from honeypot.decorators import verify_honeypot_value, check_honeypot, honeypot_exempt
def _get_GET_request():
return HttpRequest()
def _get_POST_request():
req = HttpRequest()
req.method = "POST"
return req
def view_func(request):
return HttpResponse()
class HoneypotTestCase(TestCase):
def setUp(self):
if hasattr(settings, "HONEYPOT_VALUE"):
delattr(settings, "HONEYPOT_VALUE")
if hasattr(settings, "HONEYPOT_VERIFIER"):
delattr(settings, "HONEYPOT_VERIFIER")
settings.HONEYPOT_FIELD_NAME = "honeypot"
class VerifyHoneypotValue(HoneypotTestCase):
def test_no_call_on_get(self):
""" test that verify_honeypot_value is not called when request.method == GET """
request = _get_GET_request()
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
def test_verifier_false(self):
""" test that verify_honeypot_value fails when HONEYPOT_VERIFIER returns False """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ""
settings.HONEYPOT_VERIFIER = lambda x: False
resp = verify_honeypot_value(request, None)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_field_missing(self):
"""test that verify_honeypot_value succeeds when HONEYPOT_FIELD_NAME is missing from
request.POST"""
request = _get_POST_request()
resp = verify_honeypot_value(request, None)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_field_blank(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_VALUE is blank """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ""
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
def test_honeypot_value_string(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_VALUE is a string """
request = _get_POST_request()
settings.HONEYPOT_VALUE = "(test string)"
request.POST[settings.HONEYPOT_FIELD_NAME] = settings.HONEYPOT_VALUE
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
def test_honeypot_value_callable(self):
""" test that verify_honeypot_value succeeds when HONEYPOT_VALUE is a callable """
request = _get_POST_request()
settings.HONEYPOT_VALUE = lambda: "(test string)"
request.POST[settings.HONEYPOT_FIELD_NAME] = settings.HONEYPOT_VALUE()
resp = verify_honeypot_value(request, None)
self.assertEquals(resp, None)
class CheckHoneypotDecorator(HoneypotTestCase):
def test_default_decorator(self):
""" test that @check_honeypot works and defaults to HONEYPOT_FIELD_NAME """
new_view_func = check_honeypot(view_func)
request = _get_POST_request()
resp = new_view_func(request)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_decorator_argument(self):
""" test that check_honeypot(view, 'fieldname') works """
new_view_func = check_honeypot(view_func, "fieldname")
request = _get_POST_request()
resp = new_view_func(request)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
def test_decorator_py24_syntax(self):
""" test that @check_honeypot syntax works """
@check_honeypot("field")
def new_view_func(request):
return HttpResponse()
request = _get_POST_request()
resp = new_view_func(request)
self.assertEquals(resp.__class__, HttpResponseBadRequest)
class RenderHoneypotField(HoneypotTestCase):
def _assert_rendered_field(self, template, fieldname, value=""):
correct = render_to_string(
"honeypot/honeypot_field.html", {"fieldname": fieldname, "value": value}
)
rendered = template.render(Context())
self.assertEquals(rendered, correct)
def test_default_templatetag(self):
""" test that {% render_honeypot_field %} works and defaults to HONEYPOT_FIELD_NAME """
template = Template("{% load honeypot %}{% render_honeypot_field %}")
self._assert_rendered_field(template, settings.HONEYPOT_FIELD_NAME, "")
def test_templatetag_honeypot_value(self):
""" test that {% render_honeypot_field %} uses settings.HONEYPOT_VALUE """
template = Template("{% load honeypot %}{% render_honeypot_field %}")
settings.HONEYPOT_VALUE = "(leave blank)"
self._assert_rendered_field(
template, settings.HONEYPOT_FIELD_NAME, settings.HONEYPOT_VALUE
)
def test_templatetag_argument(self):
""" test that {% render_honeypot_field 'fieldname' %} works """
template = Template(
'{% load honeypot %}{% render_honeypot_field "fieldname" %}'
)
self._assert_rendered_field(template, "fieldname", "")
class HoneypotMiddleware(HoneypotTestCase):
_response_body = '<form method="POST"></form>'
def test_view_middleware_invalid(self):
""" don't call view when HONEYPOT_VERIFIER returns False """
request = _get_POST_request()
retval = HoneypotViewMiddleware(lambda request: None).process_view(
request, view_func, (), {}
)
self.assertEquals(retval.__class__, HttpResponseBadRequest)
def test_view_middleware_valid(self):
""" call view when HONEYPOT_VERIFIER returns True """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ""
retval = HoneypotViewMiddleware(lambda request: None).process_view(
request, view_func, (), {}
)
self.assertEquals(retval, None)
def test_response_middleware_rewrite(self):
""" ensure POST forms are rewritten """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ""
response = HttpResponse(self._response_body)
HoneypotResponseMiddleware(lambda request: response)(request)
self.assertNotContains(response, self._response_body)
self.assertContains(response, 'name="%s"' % settings.HONEYPOT_FIELD_NAME)
def test_response_middleware_contenttype_exclusion(self):
""" ensure POST forms are not rewritten for non-html content types """
request = _get_POST_request()
request.POST[settings.HONEYPOT_FIELD_NAME] = ""
response = HttpResponse(self._response_body, content_type="text/javascript")
HoneypotResponseMiddleware(lambda request: response)(request)
self.assertContains(response, self._response_body)
def test_response_middleware_unicode(self):
""" ensure that POST form rewriting works with unicode templates """
request = _get_GET_request()
unicode_body = u"\u2603" + self._response_body # add unicode snowman
response = HttpResponse(unicode_body)
HoneypotResponseMiddleware(lambda request: response)(request)
self.assertNotContains(response, unicode_body)
self.assertContains(response, 'name="%s"' % settings.HONEYPOT_FIELD_NAME)
def test_exempt_view(self):
""" call view no matter what if view is exempt """
request = _get_POST_request()
exempt_view_func = honeypot_exempt(view_func)
assert exempt_view_func.honeypot_exempt is True
retval = HoneypotViewMiddleware(lambda request: None).process_view(
request, exempt_view_func, (), {}
)
self.assertEquals(retval, None)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import os
import tarfile
import zipfile
from urllib.parse import urlsplit
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.validators import MinLengthValidator
from django.db.models import Q
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.models import BaseModelFormSet, modelformset_factory
from django.forms.widgets import RadioSelect
from django.utils.encoding import force_text
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _, ungettext
import waffle
from django_statsd.clients import statsd
from rest_framework.exceptions import Throttled
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLog
from olympia.addons import tasks as addons_tasks
from olympia.addons.models import (
Addon, AddonApprovalsCounter, AddonCategory, AddonUser,
AddonUserPendingConfirmation, Category, DeniedSlug, Preview)
from olympia.addons.utils import verify_mozilla_trademark
from olympia.amo.fields import HttpHttpsOnlyURLField, ReCaptchaField
from olympia.amo.forms import AMOModelForm
from olympia.amo.messages import DoubleSafe
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import (
remove_icons, slug_validator, slugify, sorted_groupby)
from olympia.amo.validators import OneOrMoreLetterOrNumberCharacterValidator
from olympia.applications.models import AppVersion
from olympia.blocklist.models import Block
from olympia.constants.categories import CATEGORIES, CATEGORIES_NO_APP
from olympia.devhub.utils import (
fetch_existing_translations_from_addon, UploadRestrictionChecker)
from olympia.devhub.widgets import CategoriesSelectMultiple, IconTypeSelect
from olympia.files.models import FileUpload
from olympia.files.utils import SafeZip, archive_member_validator, parse_addon
from olympia.tags.models import Tag
from olympia.translations import LOCALES
from olympia.translations.fields import (
LocaleErrorMessage, TransField, TransTextarea)
from olympia.translations.forms import TranslationFormMixin
from olympia.translations.models import Translation, delete_translation
from olympia.translations.widgets import (
TranslationTextarea, TranslationTextInput)
from olympia.users.models import (
EmailUserRestriction, UserEmailField, UserProfile)
from olympia.versions.models import (
VALID_SOURCE_EXTENSIONS, ApplicationsVersions, License, Version)
from . import tasks
def clean_addon_slug(slug, instance):
slug_validator(slug, lower=False)
if slug != instance.slug:
if Addon.objects.filter(slug=slug).exists():
raise forms.ValidationError(ugettext(
'This slug is already in use. Please choose another.'))
if DeniedSlug.blocked(slug):
msg = ugettext(u'The slug cannot be "%(slug)s". '
u'Please choose another.')
raise forms.ValidationError(msg % {'slug': slug})
return slug
def clean_tags(request, tags):
target = [slugify(t, spaces=True, lower=True) for t in tags.split(',')]
target = set(filter(None, target))
min_len = amo.MIN_TAG_LENGTH
max_len = Tag._meta.get_field('tag_text').max_length
max_tags = amo.MAX_TAGS
total = len(target)
denied = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, denied=True))
if denied:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ungettext('Invalid tag: {0}', 'Invalid tags: {0}',
len(denied)).format(', '.join(denied))
raise forms.ValidationError(msg)
restricted = (Tag.objects.values_list('tag_text', flat=True)
.filter(tag_text__in=target, restricted=True))
if not acl.action_allowed(request, amo.permissions.ADDONS_EDIT):
if restricted:
# L10n: {0} is a single tag or a comma-separated list of tags.
msg = ungettext('"{0}" is a reserved tag and cannot be used.',
'"{0}" are reserved tags and cannot be used.',
len(restricted)).format('", "'.join(restricted))
raise forms.ValidationError(msg)
else:
# Admin's restricted tags don't count towards the limit.
total = len(target - set(restricted))
if total > max_tags:
num = total - max_tags
msg = ungettext('You have {0} too many tags.',
'You have {0} too many tags.', num).format(num)
raise forms.ValidationError(msg)
if any(t for t in target if len(t) > max_len):
raise forms.ValidationError(
ugettext(
'All tags must be %s characters or less after invalid '
'characters are removed.' % max_len))
if any(t for t in target if len(t) < min_len):
msg = ungettext('All tags must be at least {0} character.',
'All tags must be at least {0} characters.',
min_len).format(min_len)
raise forms.ValidationError(msg)
return target
class AddonFormBase(TranslationFormMixin, forms.ModelForm):
fields_to_trigger_content_review = ('name', 'summary')
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.version = kw.pop('version', None)
super(AddonFormBase, self).__init__(*args, **kw)
for field in ('name', 'summary'):
if field in self.fields:
self.fields[field].validators.append(
OneOrMoreLetterOrNumberCharacterValidator())
class Meta:
models = Addon
fields = ('name', 'slug', 'summary', 'tags')
def clean_slug(self):
return clean_addon_slug(self.cleaned_data['slug'], self.instance)
def clean_name(self):
user = getattr(self.request, 'user', None)
name = verify_mozilla_trademark(
self.cleaned_data['name'], user,
form=self)
return name
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if acl.action_allowed(self.request, amo.permissions.ADDONS_EDIT):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
def save(self, *args, **kwargs):
metadata_content_review = (waffle.switch_is_active(
'metadata-content-review') and
self.instance and
self.instance.has_listed_versions())
existing_data = (
fetch_existing_translations_from_addon(
self.instance, self.fields_to_trigger_content_review)
if metadata_content_review else {})
obj = super().save(*args, **kwargs)
if not metadata_content_review:
return obj
new_data = (
fetch_existing_translations_from_addon(
obj, self.fields_to_trigger_content_review)
)
if existing_data != new_data:
# flag for content review
statsd.incr('devhub.metadata_content_review_triggered')
AddonApprovalsCounter.reset_content_for_addon(addon=obj)
return obj
class CategoryForm(forms.Form):
application = forms.TypedChoiceField(
choices=amo.APPS_CHOICES, coerce=int, widget=forms.HiddenInput,
required=True)
categories = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(), widget=CategoriesSelectMultiple)
def save(self, addon):
application = self.cleaned_data.get('application')
categories_new = [c.id for c in self.cleaned_data['categories']]
categories_old = [
c.id for c in
addon.app_categories.get(amo.APP_IDS[application].short, [])]
# Add new categories.
for c_id in set(categories_new) - set(categories_old):
AddonCategory(addon=addon, category_id=c_id).save()
# Remove old categories.
for c_id in set(categories_old) - set(categories_new):
AddonCategory.objects.filter(
addon=addon, category_id=c_id).delete()
# Remove old, outdated categories cache on the model.
del addon.all_categories
# Make sure the add-on is properly re-indexed
addons_tasks.index_addons.delay([addon.id])
def clean_categories(self):
categories = self.cleaned_data['categories']
total = categories.count()
max_cat = amo.MAX_CATEGORIES
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ungettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
has_misc = list(filter(lambda x: x.misc, categories))
if has_misc and total > 1:
raise forms.ValidationError(ugettext(
'The miscellaneous category cannot be combined with '
'additional categories.'))
return categories
class BaseCategoryFormSet(BaseFormSet):
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.request = kw.pop('request', None)
super(BaseCategoryFormSet, self).__init__(*args, **kw)
self.initial = []
apps = sorted(self.addon.compatible_apps.keys(), key=lambda x: x.id)
# Drop any apps that don't have appropriate categories.
qs = Category.objects.filter(type=self.addon.type)
app_cats = {k: list(v) for k, v in sorted_groupby(qs, 'application')}
for app in list(apps):
if app and not app_cats.get(app.id):
apps.remove(app)
if not app_cats:
apps = []
for app in apps:
cats = self.addon.app_categories.get(app.short, [])
self.initial.append({'categories': [c.id for c in cats]})
for app, form in zip(apps, self.forms):
key = app.id if app else None
form.request = self.request
form.initial['application'] = key
form.app = app
cats = sorted(app_cats[key], key=lambda x: x.name)
form.fields['categories'].choices = [(c.id, c.name) for c in cats]
def save(self):
for f in self.forms:
f.save(self.addon)
CategoryFormSet = formset_factory(form=CategoryForm,
formset=BaseCategoryFormSet, extra=0)
def icons():
"""
Generates a list of tuples for the default icons for add-ons,
in the format (pseudo-mime-type, description).
"""
icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')]
dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH)
for fname in files:
if b'32' in fname and b'default' not in fname:
icon_name = force_text(fname.split(b'-')[0])
icons.append(('icon/%s' % icon_name, icon_name))
return sorted(icons)
class AddonFormMedia(AddonFormBase):
icon_type = forms.CharField(widget=IconTypeSelect(
choices=[]), required=False)
icon_upload_hash = forms.CharField(required=False)
class Meta:
model = Addon
fields = ('icon_upload_hash', 'icon_type')
def __init__(self, *args, **kwargs):
super(AddonFormMedia, self).__init__(*args, **kwargs)
# Add icons here so we only read the directory when
# AddonFormMedia is actually being used.
self.fields['icon_type'].widget.choices = icons()
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
tasks.resize_icon.delay(
upload_path, destination, amo.ADDON_ICON_SIZES,
set_modified_on=addon.serializable_reference())
return super(AddonFormMedia, self).save(commit)
class AdditionalDetailsForm(AddonFormBase):
default_locale = forms.TypedChoiceField(choices=LOCALES)
homepage = TransField.adapt(HttpHttpsOnlyURLField)(required=False)
tags = forms.CharField(required=False)
contributions = HttpHttpsOnlyURLField(required=False, max_length=255)
class Meta:
model = Addon
fields = ('default_locale', 'homepage', 'tags', 'contributions')
def __init__(self, *args, **kw):
super(AdditionalDetailsForm, self).__init__(*args, **kw)
if self.fields.get('tags'):
self.fields['tags'].initial = ', '.join(
self.get_tags(self.instance))
def clean_contributions(self):
if self.cleaned_data['contributions']:
hostname = urlsplit(self.cleaned_data['contributions']).hostname
if not hostname.endswith(amo.VALID_CONTRIBUTION_DOMAINS):
raise forms.ValidationError(ugettext(
'URL domain must be one of [%s], or a subdomain.'
) % ', '.join(amo.VALID_CONTRIBUTION_DOMAINS))
return self.cleaned_data['contributions']
def clean(self):
# Make sure we have the required translations in the new locale.
required = 'name', 'summary', 'description'
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = self.cleaned_data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
if missing:
raise forms.ValidationError(ugettext(
'Before changing your default locale you must have a '
'name, summary, and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return super(AdditionalDetailsForm, self).clean()
def save(self, addon, commit=False):
if self.fields.get('tags'):
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
# Add new tags.
for t in set(tags_new) - set(tags_old):
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in set(tags_old) - set(tags_new):
Tag(tag_text=t).remove_tag(addon)
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AdditionalDetailsForm, self).save(commit=False)
addonform.save()
return addonform
class AdditionalDetailsFormUnlisted(AdditionalDetailsForm):
# We want the same fields as the listed version. In particular,
# default_locale is referenced in the template and needs to exist.
pass
class AddonFormTechnical(AddonFormBase):
developer_comments = TransField(widget=TransTextarea, required=False)
class Meta:
model = Addon
fields = ('developer_comments',)
class AddonFormTechnicalUnlisted(AddonFormBase):
class Meta:
model = Addon
fields = ()
class AuthorForm(forms.ModelForm):
user = UserEmailField(required=True, queryset=UserProfile.objects.all())
role = forms.TypedChoiceField(
required=True,
choices=amo.AUTHOR_CHOICES,
initial=amo.AUTHOR_ROLE_OWNER,
coerce=int)
class Meta:
model = AddonUser
exclude = ('addon',)
def __init__(self, *args, **kwargs):
# addon should be passed through form_kwargs={'addon': addon} when
# initializing the formset.
self.addon = kwargs.pop('addon')
super().__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
# Clients are not allowed to change existing authors. If they want
# to do that, they need to remove the existing author and add a new
# one. This makes the confirmation system easier to manage.
self.fields['user'].disabled = True
# Set the email to be displayed in the form instead of the pk.
self.initial['user'] = instance.user.email
def clean(self):
rval = super().clean()
if self._meta.model == AddonUser and (
self.instance is None or not self.instance.pk):
# This should never happen, the client is trying to add a user
# directly to AddonUser through the formset, they should have
# been added to AuthorWaitingConfirmation instead.
raise forms.ValidationError(
ugettext('Users can not be added directly'))
return rval
class AuthorWaitingConfirmationForm(AuthorForm):
class Meta(AuthorForm.Meta):
model = AddonUserPendingConfirmation
def clean_user(self):
user = self.cleaned_data.get('user')
if user:
if not EmailUserRestriction.allow_email(user.email):
raise forms.ValidationError(EmailUserRestriction.error_message)
if self.addon.authors.filter(pk=user.pk).exists():
raise forms.ValidationError(
ugettext('An author can only be present once.'))
name_validators = user._meta.get_field('display_name').validators
try:
if user.display_name is None:
raise forms.ValidationError('') # Caught below.
for validator in name_validators:
validator(user.display_name)
except forms.ValidationError:
raise forms.ValidationError(ugettext(
'The account needs a display name before it can be added '
'as an author.'))
return user
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = list(filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)]))
if not any(d['role'] == amo.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(
ugettext('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
ugettext('At least one author must be listed.'))
class BaseAuthorWaitingConfirmationFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = list(filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)]))
users = [d['user'].id for d in data]
if len(users) != len(set(users)):
raise forms.ValidationError(
ugettext('An author can only be present once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
AuthorWaitingConfirmationFormSet = modelformset_factory(
AddonUserPendingConfirmation, formset=BaseAuthorWaitingConfirmationFormSet,
form=AuthorWaitingConfirmationForm, can_delete=True, extra=0)
class DeleteForm(forms.Form):
slug = forms.CharField()
reason = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
super(DeleteForm, self).__init__(*args, **kwargs)
def clean_slug(self):
data = self.cleaned_data
if not data['slug'] == self.addon.slug:
raise forms.ValidationError(ugettext('Slug incorrect.'))
class LicenseRadioSelect(forms.RadioSelect):
def get_context(self, name, value, attrs):
context = super(LicenseRadioSelect, self).get_context(
name, value, attrs)
# Make sure the `class` is only set on the radio fields and
# not on the `ul`. This avoids style issues among other things.
# See https://github.com/mozilla/addons-server/issues/8902
# and https://github.com/mozilla/addons-server/issues/8920
del context['widget']['attrs']['class']
return context
def create_option(self, name, value, label, selected, index,
subindex=None, attrs=None):
context = super(LicenseRadioSelect, self).create_option(
name=name, value=value, label=label, selected=selected,
index=index, subindex=subindex, attrs=attrs)
link = (u'<a class="xx extra" href="%s" target="_blank" '
u'rel="noopener noreferrer">%s</a>')
license = self.choices[index][1]
if hasattr(license, 'url') and license.url:
details = link % (license.url, ugettext('Details'))
context['label'] = mark_safe(str(context['label']) + ' ' + details)
if hasattr(license, 'icons'):
context['attrs']['data-cc'] = license.icons
context['attrs']['data-name'] = str(license)
return context
class LicenseForm(AMOModelForm):
# Hack to restore behavior from pre Django 1.10 times.
# Django 1.10 enabled `required` rendering for required widgets. That
# wasn't the case before, this should be fixed properly but simplifies
# the actual Django 1.11 deployment for now.
# See https://github.com/mozilla/addons-server/issues/8912 for proper fix.
use_required_attribute = False
builtin = forms.TypedChoiceField(
choices=[], coerce=int,
widget=LicenseRadioSelect(attrs={'class': 'license'}))
name = forms.CharField(widget=TranslationTextInput(),
label=_(u'What is your license\'s name?'),
required=False, initial=_('Custom License'))
text = forms.CharField(widget=TranslationTextarea(), required=False,
label=_(u'Provide the text of your license.'))
def __init__(self, *args, **kwargs):
self.version = kwargs.pop('version', None)
if self.version:
kwargs['instance'], kwargs['initial'] = self.version.license, None
# Clear out initial data if it's a builtin license.
if getattr(kwargs['instance'], 'builtin', None):
kwargs['initial'] = {'builtin': kwargs['instance'].builtin}
kwargs['instance'] = None
self.cc_licenses = kwargs.pop(
'cc', self.version.addon.type == amo.ADDON_STATICTHEME)
else:
self.cc_licenses = kwargs.pop(
'cc', False)
super(LicenseForm, self).__init__(*args, **kwargs)
licenses = License.objects.builtins(
cc=self.cc_licenses).filter(on_form=True)
cs = [(x.builtin, x) for x in licenses]
if not self.cc_licenses:
# creative commons licenses don't have an 'other' option.
cs.append((License.OTHER, ugettext('Other')))
self.fields['builtin'].choices = cs
if (self.version and
self.version.channel == amo.RELEASE_CHANNEL_UNLISTED):
self.fields['builtin'].required = False
class Meta:
model = License
fields = ('builtin', 'name', 'text')
def clean_name(self):
name = self.cleaned_data['name']
return name.strip() or ugettext('Custom License')
def clean(self):
data = self.cleaned_data
if self.errors:
return data
elif data['builtin'] == License.OTHER and not data['text']:
raise forms.ValidationError(
ugettext('License text is required when choosing Other.'))
return data
def get_context(self):
"""Returns a view context dict having keys license_form,
and license_other_val.
"""
return {
'version': self.version,
'license_form': self.version and self,
'license_other_val': License.OTHER
}
def save(self, *args, **kw):
"""Save all form data.
This will only create a new license if it's not one of the builtin
ones.
Keyword arguments
**log=True**
Set to False if you do not want to log this action for display
on the developer dashboard.
"""
log = kw.pop('log', True)
changed = self.changed_data
builtin = self.cleaned_data['builtin']
if builtin == '': # No license chosen, it must be an unlisted add-on.
return
is_other = builtin == License.OTHER
if not is_other:
# We're dealing with a builtin license, there is no modifications
# allowed to it, just return it.
license = License.objects.get(builtin=builtin)
else:
# We're not dealing with a builtin license, so save it to the
# database.
license = super(LicenseForm, self).save(*args, **kw)
if self.version:
if (changed and is_other) or license != self.version.license:
self.version.update(license=license)
if log:
ActivityLog.create(amo.LOG.CHANGE_LICENSE, license,
self.version.addon)
return license
class PolicyForm(TranslationFormMixin, AMOModelForm):
"""Form for editing the add-ons EULA and privacy policy."""
has_eula = forms.BooleanField(
required=False,
label=_(u'This add-on has an End-User License Agreement'))
eula = TransField(
widget=TransTextarea(), required=False,
label=_(u'Please specify your add-on\'s '
u'End-User License Agreement:'))
has_priv = forms.BooleanField(
required=False, label=_(u'This add-on has a Privacy Policy'),
label_suffix='')
privacy_policy = TransField(
widget=TransTextarea(), required=False,
label=_(u'Please specify your add-on\'s Privacy Policy:'))
def __init__(self, *args, **kw):
self.addon = kw.pop('addon', None)
if not self.addon:
raise ValueError('addon keyword arg cannot be None')
kw['instance'] = self.addon
kw['initial'] = dict(has_priv=self._has_field('privacy_policy'),
has_eula=self._has_field('eula'))
super(PolicyForm, self).__init__(*args, **kw)
def _has_field(self, name):
# If there's a eula in any language, this addon has a eula.
n = getattr(self.addon, u'%s_id' % name)
return any(map(bool, Translation.objects.filter(id=n)))
class Meta:
model = Addon
fields = ('eula', 'privacy_policy')
def save(self, commit=True):
ob = super(PolicyForm, self).save(commit)
for k, field in (('has_eula', 'eula'),
('has_priv', 'privacy_policy')):
if not self.cleaned_data[k]:
delete_translation(self.instance, field)
if 'privacy_policy' in self.changed_data:
ActivityLog.create(amo.LOG.CHANGE_POLICY, self.addon,
self.instance)
return ob
class WithSourceMixin(object):
def get_invalid_source_file_type_message(self):
valid_extensions_string = '(%s)' % ', '.join(VALID_SOURCE_EXTENSIONS)
return ugettext(
'Unsupported file type, please upload an archive '
'file {extensions}.'.format(extensions=valid_extensions_string))
def clean_source(self):
source = self.cleaned_data.get('source')
if source:
# Ensure the file type is one we support.
if not source.name.endswith(VALID_SOURCE_EXTENSIONS):
raise forms.ValidationError(
self.get_invalid_source_file_type_message())
# Check inside to see if the file extension matches the content.
try:
if source.name.endswith('.zip'):
zip_file = SafeZip(source)
# testzip() returns None if there are no broken CRCs.
if zip_file.zip_file.testzip() is not None:
raise zipfile.BadZipFile()
elif source.name.endswith(('.tar.gz', '.tar.bz2', '.tgz')):
# For tar files we need to do a little more work.
mode = 'r:bz2' if source.name.endswith('bz2') else 'r:gz'
with tarfile.open(mode=mode, fileobj=source) as archive:
archive_members = archive.getmembers()
for member in archive_members:
archive_member_validator(archive, member)
else:
raise forms.ValidationError(
self.get_invalid_source_file_type_message())
except (zipfile.BadZipFile, tarfile.ReadError, IOError, EOFError):
raise forms.ValidationError(
ugettext('Invalid or broken archive.'))
return source
class SourceFileInput(forms.widgets.ClearableFileInput):
"""
Like ClearableFileInput but with custom link URL and text for the initial
data. Uses a custom template because django's is not flexible enough for
our needs.
"""
initial_text = _('View current')
template_name = 'devhub/addons/includes/source_file_input.html'
def get_context(self, name, value, attrs):
context = super(SourceFileInput, self).get_context(name, value, attrs)
if value and hasattr(value, 'instance'):
context['download_url'] = reverse(
'downloads.source', args=(value.instance.pk, ))
return context
class VersionForm(WithSourceMixin, forms.ModelForm):
release_notes = TransField(
widget=TransTextarea(), required=False)
approval_notes = forms.CharField(
widget=TranslationTextarea(attrs={'rows': 4}), required=False)
source = forms.FileField(required=False, widget=SourceFileInput)
class Meta:
model = Version
fields = ('release_notes', 'approval_notes', 'source',)
class AppVersionChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.version
class CompatForm(forms.ModelForm):
application = forms.TypedChoiceField(choices=amo.APPS_CHOICES,
coerce=int,
widget=forms.HiddenInput)
min = AppVersionChoiceField(AppVersion.objects.none())
max = AppVersionChoiceField(AppVersion.objects.none())
class Meta:
model = ApplicationsVersions
fields = ('application', 'min', 'max')
def __init__(self, *args, **kwargs):
# 'version' should always be passed as a kwarg to this form. If it's
# absent, it probably means form_kwargs={'version': version} is missing
# from the instantiation of the formset.
version = kwargs.pop('version')
super(CompatForm, self).__init__(*args, **kwargs)
if self.initial:
app = self.initial['application']
else:
app = self.data[self.add_prefix('application')]
self.app = amo.APPS_ALL[int(app)]
qs = AppVersion.objects.filter(application=app).order_by('version_int')
# Legacy extensions can't set compatibility higher than 56.* for
# Firefox and Firefox for Android.
# This does not concern Mozilla Signed Legacy extensions which
# are shown the same version choice as WebExtensions.
if (self.app in (amo.FIREFOX, amo.ANDROID) and
not version.is_webextension and
not version.is_mozilla_signed and
version.addon.type not in amo.NO_COMPAT + (amo.ADDON_LPAPP,)):
qs = qs.filter(version_int__lt=57000000000000)
self.fields['min'].queryset = qs.filter(~Q(version__contains='*'))
self.fields['max'].queryset = qs.all()
def clean(self):
min_ = self.cleaned_data.get('min')
max_ = self.cleaned_data.get('max')
if not (min_ and max_ and min_.version_int <= max_.version_int):
raise forms.ValidationError(ugettext('Invalid version range.'))
return self.cleaned_data
class BaseCompatFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseCompatFormSet, self).__init__(*args, **kwargs)
# We always want a form for each app, so force extras for apps
# the add-on does not already have.
version = self.form_kwargs.get('version')
static_theme = version and version.addon.type == amo.ADDON_STATICTHEME
available_apps = amo.APP_USAGE
self.can_delete = not static_theme # No tinkering with apps please.
# Only display the apps we care about, if somehow obsolete apps were
# recorded before.
self.queryset = self.queryset.filter(
application__in=[a.id for a in available_apps])
initial_apps = self.queryset.values_list('application', flat=True)
self.initial = ([{'application': appver.application,
'min': appver.min.pk,
'max': appver.max.pk} for appver in self.queryset] +
[{'application': app.id} for app in available_apps
if app.id not in initial_apps])
self.extra = (
max(len(available_apps) - len(self.forms), 0) if not static_theme
else 0)
# After these changes, the forms need to be rebuilt. `forms`
# is a cached property, so we delete the existing cache and
# ask for a new one to be built.
# del self.forms
if hasattr(self, 'forms'):
del self.forms
self.forms
def clean(self):
if any(self.errors):
return
apps = list(filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)]))
if not apps:
# At this point, we're raising a global error and re-displaying the
# applications that were present before. We don't want to keep the
# hidden delete fields in the data attribute, cause that's used to
# populate initial data for all forms, and would therefore make
# those delete fields active again.
self.data = {k: v for k, v in self.data.items()
if not k.endswith('-DELETE')}
for form in self.forms:
form.data = self.data
raise forms.ValidationError(
ugettext('Need at least one compatible application.'))
CompatFormSet = modelformset_factory(
ApplicationsVersions, formset=BaseCompatFormSet,
form=CompatForm, can_delete=True, extra=0)
class CompatAppSelectWidget(forms.CheckboxSelectMultiple):
option_template_name = 'devhub/forms/widgets/compat_app_input_option.html'
def create_option(self, name, value, label, selected, index, subindex=None,
attrs=None):
data = super(CompatAppSelectWidget, self).create_option(
name=name, value=value, label=label, selected=selected,
index=index, subindex=subindex, attrs=attrs)
# Inject the short application name for easier styling
data['compat_app_short'] = amo.APPS_ALL[int(data['value'])].short
return data
class NewUploadForm(forms.Form):
upload = forms.ModelChoiceField(
widget=forms.HiddenInput,
queryset=FileUpload.objects,
to_field_name='uuid',
error_messages={
'invalid_choice': _(u'There was an error with your '
u'upload. Please try again.')
}
)
admin_override_validation = forms.BooleanField(
required=False, label=_(u'Override failed validation'))
compatible_apps = forms.TypedMultipleChoiceField(
choices=amo.APPS_CHOICES,
# Pre-select only Desktop Firefox, most of the times developers
# don't develop their WebExtensions for Android.
# See this GitHub comment: https://bit.ly/2QaMicU
initial=[amo.FIREFOX.id],
coerce=int,
widget=CompatAppSelectWidget(),
error_messages={
'required': _('Need to select at least one application.')
})
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.addon = kw.pop('addon', None)
super(NewUploadForm, self).__init__(*args, **kw)
# Preselect compatible apps based on the current version
if self.addon and self.addon.current_version:
# Fetch list of applications freshly from the database to not
# rely on potentially outdated data since `addon.compatible_apps`
# is a cached property
compat_apps = list(self.addon.current_version.apps.values_list(
'application', flat=True))
self.fields['compatible_apps'].initial = compat_apps
def _clean_upload(self):
if not (self.cleaned_data['upload'].valid or
self.cleaned_data['upload'].validation_timeout or
self.cleaned_data['admin_override_validation'] and
acl.action_allowed(self.request,
amo.permissions.REVIEWS_ADMIN)):
raise forms.ValidationError(
ugettext(u'There was an error with your upload. '
u'Please try again.'))
def check_throttles(self, request):
"""
Check if request should be throttled by calling the signing API
throttling method.
Raises ValidationError if the request is throttled.
"""
from olympia.signing.views import VersionView # circular import
view = VersionView()
try:
view.check_throttles(request)
except Throttled:
raise forms.ValidationError(
_('You have submitted too many uploads recently. '
'Please try again after some time.'))
def check_blocklist(self, guid, version_string):
# check the guid/version isn't in the addon blocklist
block = Block.objects.filter(guid=guid).first()
if block and block.is_version_blocked(version_string):
msg = escape(ugettext(
'Version {version} matches {block_link} for this add-on. '
'You can contact {amo_admins} for additional information.'))
formatted_msg = DoubleSafe(
msg.format(
version=version_string,
block_link=format_html(
'<a href="{}">{}</a>',
reverse('blocklist.block', args=[guid]),
ugettext('a blocklist entry')),
amo_admins=(
'<a href="mailto:amo-admins@mozilla.com">AMO Admins'
'</a>')
)
)
raise forms.ValidationError(formatted_msg)
def check_for_existing_versions(self, version_string):
if self.addon:
# Make sure we don't already have this version.
existing_versions = Version.unfiltered.filter(
addon=self.addon, version=version_string)
if existing_versions.exists():
version = existing_versions[0]
if version.deleted:
msg = ugettext(
'Version {version} was uploaded before and deleted.')
elif version.unreviewed_files:
next_url = reverse(
'devhub.submit.version.details',
args=[self.addon.slug, version.pk])
msg = DoubleSafe('%s <a href="%s">%s</a>' % (
ugettext(u'Version {version} already exists.'),
next_url,
ugettext(u'Continue with existing upload instead?')
))
else:
msg = ugettext(u'Version {version} already exists.')
raise forms.ValidationError(
msg.format(version=version_string))
def clean(self):
self.check_throttles(self.request)
if not self.errors:
self._clean_upload()
parsed_data = parse_addon(
self.cleaned_data['upload'], self.addon,
user=self.request.user)
self.check_blocklist(
self.addon.guid if self.addon else parsed_data.get('guid'),
parsed_data.get('version'))
self.check_for_existing_versions(parsed_data.get('version'))
self.cleaned_data['parsed_data'] = parsed_data
return self.cleaned_data
class SourceForm(WithSourceMixin, forms.ModelForm):
source = forms.FileField(required=False, widget=SourceFileInput)
has_source = forms.ChoiceField(
choices=(('yes', _('Yes')), ('no', _('No'))), required=True,
widget=RadioSelect)
class Meta:
model = Version
fields = ('source',)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(SourceForm, self).__init__(*args, **kwargs)
def clean_source(self):
source = self.cleaned_data.get('source')
has_source = self.data.get('has_source') # Not cleaned yet.
if has_source == 'yes' and not source:
raise forms.ValidationError(
ugettext(u'You have not uploaded a source file.'))
elif has_source == 'no' and source:
raise forms.ValidationError(
ugettext(u'Source file uploaded but you indicated no source '
u'was needed.'))
# At this point we know we can proceed with the actual archive
# validation.
return super(SourceForm, self).clean_source()
class DescribeForm(AddonFormBase):
name = TransField(max_length=50)
slug = forms.CharField(max_length=30)
summary = TransField(widget=TransTextarea(attrs={'rows': 4}),
max_length=250)
description = TransField(widget=TransTextarea(attrs={'rows': 6}),
min_length=10)
is_experimental = forms.BooleanField(required=False)
requires_payment = forms.BooleanField(required=False)
support_url = TransField.adapt(HttpHttpsOnlyURLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'summary', 'description', 'is_experimental',
'support_url', 'support_email', 'requires_payment')
def __init__(self, *args, **kw):
super(DescribeForm, self).__init__(*args, **kw)
content_waffle = waffle.switch_is_active('content-optimization')
if not content_waffle or self.instance.type != amo.ADDON_EXTENSION:
description = self.fields['description']
description.min_length = None
description.widget.attrs.pop('minlength', None)
description.validators = [
validator for validator in description.validators
if not isinstance(validator, MinLengthValidator)]
description.required = False
class CombinedNameSummaryCleanMixin(object):
MAX_LENGTH = 70
def __init__(self, *args, **kw):
self.should_auto_crop = kw.pop('should_auto_crop', False)
super(CombinedNameSummaryCleanMixin, self).__init__(*args, **kw)
# We need the values for the template but not the MaxLengthValidators
self.fields['name'].max_length = (
self.MAX_LENGTH - self.fields['summary'].min_length)
self.fields['summary'].max_length = (
self.MAX_LENGTH - self.fields['name'].min_length)
def clean(self):
message = _(u'Ensure name and summary combined are at most '
u'{limit_value} characters (they have {show_value}).')
super(CombinedNameSummaryCleanMixin, self).clean()
name_summary_locales = set(
list(self.cleaned_data.get('name', {}).keys()) +
list(self.cleaned_data.get('summary', {}).keys()))
default_locale = self.instance.default_locale.lower()
name_values = self.cleaned_data.get('name') or {}
name_default = name_values.get(default_locale) or ''
summary_values = self.cleaned_data.get('summary') or {}
summary_default = summary_values.get(default_locale) or ''
for locale in name_summary_locales:
val_len = len(name_values.get(locale, name_default) +
summary_values.get(locale, summary_default))
if val_len > self.MAX_LENGTH:
if locale == default_locale:
# only error in default locale.
formatted_message = message.format(
limit_value=self.MAX_LENGTH, show_value=val_len)
self.add_error(
'name', LocaleErrorMessage(
message=formatted_message, locale=locale))
elif self.should_auto_crop:
# otherwise we need to shorten the summary (and or name?)
if locale in name_values:
# if only default summary need to shorten name instead.
max_name_length = (
self.fields['name'].max_length
if locale in summary_values
else self.MAX_LENGTH - len(summary_default))
name = name_values[locale][:max_name_length]
name_length = len(name)
self.cleaned_data.setdefault('name', {})[locale] = name
else:
name_length = len(name_default)
if locale in summary_values:
max_summary_length = self.MAX_LENGTH - name_length
self.cleaned_data.setdefault('summary', {})[locale] = (
summary_values[locale][:max_summary_length])
return self.cleaned_data
class DescribeFormContentOptimization(CombinedNameSummaryCleanMixin,
DescribeForm):
name = TransField(min_length=2)
summary = TransField(min_length=2)
class DescribeFormUnlisted(AddonFormBase):
name = TransField(max_length=50)
slug = forms.CharField(max_length=30)
summary = TransField(widget=TransTextarea(attrs={'rows': 4}),
max_length=250)
description = TransField(widget=TransTextarea(attrs={'rows': 4}),
required=False)
class Meta:
model = Addon
fields = ('name', 'slug', 'summary', 'description')
class DescribeFormUnlistedContentOptimization(CombinedNameSummaryCleanMixin,
DescribeFormUnlisted):
name = TransField(max_length=68, min_length=2)
summary = TransField(max_length=68, min_length=2)
class PreviewForm(forms.ModelForm):
caption = TransField(widget=TransTextarea, required=False)
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
def save(self, addon, commit=True):
if self.cleaned_data:
self.instance.addon = addon
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(
settings.TMP_PATH, 'preview', upload_hash)
tasks.resize_preview.delay(
upload_path, self.instance.pk,
set_modified_on=self.instance.serializable_reference())
class Meta:
model = Preview
fields = ('caption', 'file_upload', 'upload_hash', 'id', 'position')
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class DistributionChoiceForm(forms.Form):
LISTED_LABEL = _(
'On this site. <span class="helptext">'
'Your submission will be listed on this site and the Firefox '
'Add-ons Manager for millions of users, after it passes code '
'review. Automatic updates are handled by this site. This '
'add-on will also be considered for Mozilla promotions and '
'contests. Self-distribution of the reviewed files is also '
'possible.</span>')
UNLISTED_LABEL = _(
'On your own. <span class="helptext">'
'Your submission will be immediately signed for '
'self-distribution. Updates should be handled by you via an '
'updateURL or external application updates.</span>')
channel = forms.ChoiceField(
choices=[],
initial='listed',
widget=forms.RadioSelect(attrs={'class': 'channel'}))
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon', None)
super().__init__(*args, **kwargs)
choices = [
('listed', mark_safe(self.LISTED_LABEL)),
('unlisted', mark_safe(self.UNLISTED_LABEL))
]
if self.addon and self.addon.disabled_by_user:
# If the add-on is disabled, 'listed' is not a valid choice,
# "invisible" add-ons can not upload new listed versions.
choices.pop(0)
self.fields['channel'].choices = choices
class AgreementForm(forms.Form):
distribution_agreement = forms.BooleanField()
review_policy = forms.BooleanField()
display_name = forms.CharField(label=_('Display Name'))
recaptcha = ReCaptchaField(label='')
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super().__init__(*args, **kwargs)
if not waffle.switch_is_active('developer-agreement-captcha'):
del self.fields['recaptcha']
if (self.request.user.is_authenticated and
self.request.user.display_name):
# Don't bother asking for a display name if there is one already.
del self.fields['display_name']
else:
# If there isn't one... we want to make sure to use the same
# validators as the model.
self.fields['display_name'].validators += (
UserProfile._meta.get_field('display_name').validators)
def clean(self):
# Check if user ip or email is not supposed to be allowed to submit.
checker = UploadRestrictionChecker(self.request)
if not checker.is_submission_allowed(check_dev_agreement=False):
raise forms.ValidationError(checker.get_error_message())
return self.cleaned_data
class SingleCategoryForm(forms.Form):
category = forms.ChoiceField(widget=forms.RadioSelect)
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.request = kw.pop('request', None)
if len(self.addon.all_categories) > 0:
kw['initial'] = {'category': self.addon.all_categories[0].slug}
super(SingleCategoryForm, self).__init__(*args, **kw)
sorted_cats = sorted(CATEGORIES_NO_APP[self.addon.type].items(),
key=lambda slug_cat: slug_cat[0])
self.fields['category'].choices = [
(slug, c.name) for slug, c in sorted_cats]
def save(self):
category_slug = self.cleaned_data['category']
# Clear any old categor[y|ies]
AddonCategory.objects.filter(addon=self.addon).delete()
# Add new categor[y|ies]
for app in CATEGORIES.keys():
category = CATEGORIES[app].get(
self.addon.type, {}).get(category_slug, None)
if category:
AddonCategory(addon=self.addon, category_id=category.id).save()
# Remove old, outdated categories cache on the model.
del self.addon.all_categories
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import re
import datetime
import tempfile
import time
from viper.common.constants import VIPER_ROOT
try:
import pefile
import peutils
HAVE_PEFILE = True
except ImportError:
HAVE_PEFILE = False
try:
from modules.pehash.pehasher import calculate_pehash
HAVE_PEHASH = True
except ImportError:
HAVE_PEHASH = False
try:
from modules.verifysigs.verifysigs import get_auth_data
from verifysigs.asn1 import dn
HAVE_VERIFYSIGS= True
except ImportError:
HAVE_VERIFYSIGS = False
from viper.common.out import bold, table
from viper.common.abstracts import Module
from viper.common.utils import get_type, get_md5
from viper.core.database import Database
from viper.core.storage import get_sample_path
from viper.core.session import __sessions__
class PE(Module):
cmd = 'pe'
description = 'Extract information from PE32 headers'
authors = ['nex', 'Statixs']
def __init__(self):
super(PE, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
subparsers.add_parser('imports', help='List PE imports')
subparsers.add_parser('exports', help='List PE exports')
parser_ep = subparsers.add_parser('entrypoint', help='Show and scan for AddressOfEntryPoint')
parser_ep.add_argument('-a', '--all', action='store_true', help='Prints the AddressOfEntryPoint of all files in the project')
parser_ep.add_argument('-c', '--cluster', action='store_true', help='Cluster all files in the project')
parser_ep.add_argument('-s', '--scan', action='store_true', help='Scan repository for matching samples')
parser_res = subparsers.add_parser('resources', help='List PE resources')
parser_res.add_argument('-d', '--dump', metavar='folder', help='Destination directory to store resource files in')
parser_res.add_argument('-o', '--open', metavar='resource number', type=int, help='Open a session on the specified resource')
parser_res.add_argument('-s', '--scan', action='store_true', help='Scan the repository for common resources')
parser_imp = subparsers.add_parser('imphash', help='Get and scan for imphash')
parser_imp.add_argument('-s', '--scan', action='store_true', help='Scan for all samples with same imphash')
parser_imp.add_argument('-c', '--cluster', action='store_true', help='Cluster repository by imphash (careful, could be massive)')
parser_comp = subparsers.add_parser('compiletime', help='Show the compiletime')
parser_comp.add_argument('-s', '--scan', action='store_true', help='Scan the repository for common compile time')
parser_comp.add_argument('-w', '--window', type=int, help='Specify an optional time window in minutes')
parser_peid = subparsers.add_parser('peid', help='Show the PEiD signatures')
parser_peid.add_argument('-s', '--scan', action='store_true', help='Scan the repository for PEiD signatures')
parser_sec = subparsers.add_parser('security', help='Show digital signature')
parser_sec.add_argument('-d', '--dump', metavar='folder', help='Destination directory to store digital signature in')
parser_sec.add_argument('-a', '--all', action='store_true', help='Find all samples with a digital signature')
parser_sec.add_argument('-s', '--scan', action='store_true', help='Scan the repository for common certificates')
parser_sec.add_argument('-c', '--check', action='store_true', help='Check authenticode information')
parser_lang = subparsers.add_parser('language', help='Guess PE language')
parser_lang.add_argument('-s', '--scan', action='store_true', help='Scan the repository')
subparsers.add_parser('sections', help='List PE Sections')
parser_peh = subparsers.add_parser('pehash', help='Calculate the PEhash and compare them')
parser_peh.add_argument('-a', '--all', action='store_true', help='Prints the PEhash of all files in the project')
parser_peh.add_argument('-c', '--cluster', action='store_true', help='Calculate and cluster all files in the project')
parser_peh.add_argument('-s', '--scan', action='store_true', help='Scan repository for matching samples')
self.pe = None
def __check_session(self):
if not __sessions__.is_set():
self.log('error', "No session opened")
return False
if not self.pe:
try:
self.pe = pefile.PE(__sessions__.current.file.path)
except pefile.PEFormatError as e:
self.log('error', "Unable to parse PE file: {0}".format(e))
return False
return True
def imports(self):
if not self.__check_session():
return
if hasattr(self.pe, 'DIRECTORY_ENTRY_IMPORT'):
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
try:
self.log('info', "DLL: {0}".format(entry.dll))
for symbol in entry.imports:
self.log('item', "{0}: {1}".format(hex(symbol.address), symbol.name))
except:
continue
def exports(self):
if not self.__check_session():
return
self.log('info', "Exports:")
if hasattr(self.pe, 'DIRECTORY_ENTRY_EXPORT'):
for symbol in self.pe.DIRECTORY_ENTRY_EXPORT.symbols:
self.log('item', "{0}: {1} ({2})".format(hex(self.pe.OPTIONAL_HEADER.ImageBase + symbol.address), symbol.name, symbol.ordinal))
def entrypoint(self):
if self.args.scan and self.args.cluster:
self.log('error', "You selected two exclusive options, pick one")
return
if self.args.all:
db = Database()
samples = db.find(key='all')
rows = []
for sample in samples:
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_ep = pefile.PE(sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
except:
continue
rows.append([sample.md5, sample.name, cur_ep])
self.log('table', dict(header=['MD5', 'Name', 'AddressOfEntryPoint'], rows=rows))
return
if self.args.cluster:
db = Database()
samples = db.find(key='all')
cluster = {}
for sample in samples:
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_ep = pefile.PE(sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
except:
continue
if cur_ep not in cluster:
cluster[cur_ep] = []
cluster[cur_ep].append([sample.md5, sample.name])
for cluster_name, cluster_members in cluster.items():
# Skipping clusters with only one entry.
if len(cluster_members) == 1:
continue
self.log('info', "AddressOfEntryPoint cluster {0}".format(bold(cluster_name)))
self.log('table', dict(header=['MD5', 'Name'],
rows=cluster_members))
return
if not self.__check_session():
return
ep = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint
self.log('info', "AddressOfEntryPoint: {0}".format(ep))
if self.args.scan:
db = Database()
samples = db.find(key='all')
rows = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_ep = pefile.PE(sample_path).OPTIONAL_HEADER.AddressOfEntryPoint
except:
continue
if ep == cur_ep:
rows.append([sample.md5, sample.name])
self.log('info', "Following are samples with AddressOfEntryPoint {0}".format(bold(ep)))
self.log('table', dict(header=['MD5', 'Name'],
rows=rows))
def compiletime(self):
def get_compiletime(pe):
return datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)
if not self.__check_session():
return
compile_time = get_compiletime(self.pe)
self.log('info', "Compile Time: {0}".format(bold(compile_time)))
if self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_pe = pefile.PE(sample_path)
cur_compile_time = get_compiletime(cur_pe)
except:
continue
if compile_time == cur_compile_time:
matches.append([sample.name, sample.md5, cur_compile_time])
else:
if self.args.window:
if cur_compile_time > compile_time:
delta = (cur_compile_time - compile_time)
elif cur_compile_time < compile_time:
delta = (compile_time - cur_compile_time)
delta_minutes = int(delta.total_seconds()) / 60
if delta_minutes <= self.args.window:
matches.append([sample.name, sample.md5, cur_compile_time])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Compile Time'], rows=matches))
def peid(self):
def get_signatures():
with file(os.path.join(VIPER_ROOT, 'data/peid/UserDB.TXT'), 'rt') as f:
sig_data = f.read()
signatures = peutils.SignatureDatabase(data=sig_data)
return signatures
def get_matches(pe, signatures):
matches = signatures.match_all(pe, ep_only=True)
return matches
if not self.__check_session():
return
signatures = get_signatures()
peid_matches = get_matches(self.pe, signatures)
if peid_matches:
self.log('info', "PEiD Signatures:")
for sig in peid_matches:
if type(sig) is list:
self.log('item', sig[0])
else:
self.log('item', sig)
else:
self.log('info', "No PEiD signatures matched.")
if self.args.scan and peid_matches:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_pe = pefile.PE(sample_path)
cur_peid_matches = get_matches(cur_pe, signatures)
except:
continue
if peid_matches == cur_peid_matches:
matches.append([sample.name, sample.sha256])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
def resources(self):
# Use this function to retrieve resources for the given PE instance.
# Returns all the identified resources with indicators and attributes.
def get_resources(pe):
resources = []
if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
count = 1
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
try:
resource = {}
if resource_type.name is not None:
name = str(resource_type.name)
else:
name = str(pefile.RESOURCE_TYPE.get(resource_type.struct.Id))
if name is None:
name = str(resource_type.struct.Id)
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
filetype = get_type(data)
md5 = get_md5(data)
language = pefile.LANG.get(resource_lang.data.lang, None)
sublanguage = pefile.get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
offset = ('%-8s' % hex(resource_lang.data.struct.OffsetToData)).strip()
size = ('%-8s' % hex(resource_lang.data.struct.Size)).strip()
resource = [count, name, offset, md5, size, filetype, language, sublanguage]
# Dump resources if requested to and if the file currently being
# processed is the opened session file.
# This is to avoid that during a --scan all the resources being
# scanned are dumped as well.
if (self.args.open or self.args.dump) and pe == self.pe:
if self.args.dump:
folder = self.args.dump
else:
folder = tempfile.mkdtemp()
resource_path = os.path.join(folder, '{0}_{1}_{2}'.format(__sessions__.current.file.md5, offset, name))
resource.append(resource_path)
with open(resource_path, 'wb') as resource_handle:
resource_handle.write(data)
resources.append(resource)
count += 1
except Exception as e:
self.log('error', e)
continue
return resources
if not self.__check_session():
return
# Obtain resources for the currently opened file.
resources = get_resources(self.pe)
if not resources:
self.log('warning', "No resources found")
return
headers = ['#', 'Name', 'Offset', 'MD5', 'Size', 'File Type', 'Language', 'Sublanguage']
if self.args.dump or self.args.open:
headers.append('Dumped To')
self.log('table', dict(header=headers, rows=resources))
# If instructed, open a session on the given resource.
if self.args.open:
for resource in resources:
if resource[0] == self.args.open:
__sessions__.new(resource[8])
return
# If instructed to perform a scan across the repository, start looping
# through all available files.
elif self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
# Retrieve list of samples stored locally and available in the
# database.
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
# Skip if it's the same file.
if sample.sha256 == __sessions__.current.file.sha256:
continue
# Obtain path to the binary.
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
# Open PE instance.
try:
cur_pe = pefile.PE(sample_path)
except:
continue
# Obtain the list of resources for the current iteration.
cur_resources = get_resources(cur_pe)
matched_resources = []
# Loop through entry's resources.
for cur_resource in cur_resources:
# Loop through opened file's resources.
for resource in resources:
# If there is a common resource, add it to the list.
if cur_resource[3] == resource[3]:
matched_resources.append(resource[3])
# If there are any common resources, add the entry to the list
# of matched samples.
if len(matched_resources) > 0:
matches.append([sample.name, sample.md5, '\n'.join(r for r in matched_resources)])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Resource MD5'], rows=matches))
def imphash(self):
if self.args.scan and self.args.cluster:
self.log('error', "You selected two exclusive options, pick one")
return
if self.args.cluster:
self.log('info', "Clustering all samples by imphash...")
db = Database()
samples = db.find(key='all')
cluster = {}
for sample in samples:
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_imphash = pefile.PE(sample_path).get_imphash()
except:
continue
if cur_imphash not in cluster:
cluster[cur_imphash] = []
cluster[cur_imphash].append([sample.sha256, sample.name])
for cluster_name, cluster_members in cluster.items():
# Skipping clusters with only one entry.
if len(cluster_members) == 1:
continue
self.log('info', "Imphash cluster {0}".format(bold(cluster_name)))
self.log('table', dict(header=['MD5', 'Name'],
rows=cluster_members))
return
if self.__check_session():
try:
imphash = self.pe.get_imphash()
except AttributeError:
self.log('error', "No imphash support, upgrade pefile to a version >= 1.2.10-139 (`pip install --upgrade pefile`)")
return
self.log('info', "Imphash: {0}".format(bold(imphash)))
if self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_imphash = pefile.PE(sample_path).get_imphash()
except:
continue
if imphash == cur_imphash:
matches.append([sample.name, sample.sha256])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
def security(self):
def get_certificate(pe):
# TODO: this only extract the raw list of certificate data.
# I need to parse them, extract single certificates and perhaps return
# the PEM data of the first certificate only.
pe_security_dir = pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']
address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].VirtualAddress
# size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].Size
if address:
return pe.write()[address + 8:]
else:
return None
def get_signed_samples(current=None, cert_filter=None):
db = Database()
samples = db.find(key='all')
results = []
for sample in samples:
# Skip if it's the same file.
if current:
if sample.sha256 == current:
continue
# Obtain path to the binary.
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
# Open PE instance.
try:
cur_pe = pefile.PE(sample_path)
except:
continue
cur_cert_data = get_certificate(cur_pe)
if not cur_cert_data:
continue
cur_cert_md5 = get_md5(cur_cert_data)
if cert_filter:
if cur_cert_md5 == cert_filter:
results.append([sample.name, sample.md5])
else:
results.append([sample.name, sample.md5, cur_cert_md5])
return results
if self.args.all:
self.log('info', "Scanning the repository for all signed samples...")
all_of_them = get_signed_samples()
self.log('info', "{0} signed samples found".format(bold(len(all_of_them))))
if len(all_of_them) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Cert MD5'], rows=all_of_them))
return
if not self.__check_session():
return
cert_data = get_certificate(self.pe)
if not cert_data:
self.log('warning', "No certificate found")
return
cert_md5 = get_md5(cert_data)
self.log('info', "Found certificate with MD5 {0}".format(bold(cert_md5)))
if self.args.dump:
cert_path = os.path.join(self.args.dump, '{0}.crt'.format(__sessions__.current.file.sha256))
with open(cert_path, 'wb+') as cert_handle:
cert_handle.write(cert_data)
self.log('info', "Dumped certificate to {0}".format(cert_path))
self.log('info', "You can parse it using the following command:\n\t" +
bold("openssl pkcs7 -inform DER -print_certs -text -in {0}".format(cert_path)))
# TODO: do scan for certificate's serial number.
if self.args.scan:
self.log('info', "Scanning the repository for matching signed samples...")
matches = get_signed_samples(current=__sessions__.current.file.sha256, cert_filter=cert_md5)
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
# TODO: this function needs to be better integrated with the rest of the command.
# TODO: need to add more error handling and figure out why so many samples are failing.
if self.args.check:
if not HAVE_VERIFYSIGS:
self.log('error', "Dependencies missing for authenticode validation. Please install M2Crypto and pyasn1 (`pip install pyasn1 M2Crypto`)")
return
try:
auth, computed_content_hash = get_auth_data(__sessions__.current.file.path)
except Exception as e:
self.log('error', "Unable to parse PE certificate: {0}".format(str(e)))
return
try:
auth.ValidateAsn1()
auth.ValidateHashes(computed_content_hash)
auth.ValidateSignatures()
auth.ValidateCertChains(time.gmtime())
except Exception, e:
self.log('error', "Unable to validate PE certificate: {0}".format(str(e)))
return
self.log('info', bold('Signature metadata:'))
self.log('info', 'Program name: {0}'.format(auth.program_name))
self.log('info', 'URL: {0}'.format(auth.program_url))
if auth.has_countersignature:
self.log('info', bold('Countersignature is present. Timestamp: {0} UTC'.format(
time.asctime(time.gmtime(auth.counter_timestamp)))))
else:
self.log('info', bold('Countersignature is not present.'))
self.log('info', bold('Binary is signed with cert issued by:'))
self.log('info', '{0}'.format(auth.signing_cert_id[0]))
self.log('info', '{0}'.format(auth.cert_chain_head[2][0]))
self.log('info', 'Chain not before: {0} UTC'.format(
time.asctime(time.gmtime(auth.cert_chain_head[0]))))
self.log('info', 'Chain not after: {0} UTC'.format(
time.asctime(time.gmtime(auth.cert_chain_head[1]))))
if auth.has_countersignature:
self.log('info', bold('Countersig chain head issued by:'))
self.log('info', '{0}'.format(auth.counter_chain_head[2]))
self.log('info', 'Countersig not before: {0} UTC'.format(
time.asctime(time.gmtime(auth.counter_chain_head[0]))))
self.log('info', 'Countersig not after: {0} UTC'.format(
time.asctime(time.gmtime(auth.counter_chain_head[1]))))
self.log('info', bold('Certificates:'))
for (issuer, serial), cert in auth.certificates.items():
self.log('info', 'Issuer: {0}'.format(issuer))
self.log('info', 'Serial: {0}'.format(serial))
subject = cert[0][0]['subject']
subject_dn = str(dn.DistinguishedName.TraverseRdn(subject[0]))
self.log('info', 'Subject: {0}'.format(subject_dn))
not_before = cert[0][0]['validity']['notBefore']
not_after = cert[0][0]['validity']['notAfter']
not_before_time = not_before.ToPythonEpochTime()
not_after_time = not_after.ToPythonEpochTime()
self.log('info', 'Not Before: {0} UTC ({1})'.format(
time.asctime(time.gmtime(not_before_time)), not_before[0]))
self.log('info', 'Not After: {0} UTC ({1})'.format(
time.asctime(time.gmtime(not_after_time)), not_after[0]))
if auth.trailing_data:
self.log('info', 'Signature Blob had trailing (unvalidated) data ({0} bytes): {1}'.format(
len(auth.trailing_data), auth.trailing_data.encode('hex')))
def language(self):
def get_iat(pe):
iat = []
if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
for peimport in pe.DIRECTORY_ENTRY_IMPORT:
iat.append(peimport.dll)
return iat
def check_module(iat, match):
for imp in iat:
if imp.find(match) != -1:
return True
return False
def is_cpp(data, cpp_count):
for line in data:
if 'type_info' in line or 'RTTI' in line:
cpp_count += 1
break
if cpp_count == 2:
return True
return False
def is_delphi(data):
for line in data:
if 'Borland' in line:
path = line.split('\\')
for p in path:
if 'Delphi' in p:
return True
return False
def is_vbdotnet(data):
for line in data:
if 'Compiler' in line:
stuff = line.split('.')
if 'VisualBasic' in stuff:
return True
return False
def is_autoit(data):
for line in data:
if 'AU3!' in line:
return True
return False
def is_packed(pe):
for section in pe.sections:
if section.get_entropy() > 7:
return True
return False
def get_strings(content):
regexp = '[\x30-\x39\x41-\x5f\x61-\x7a\-\.:]{4,}'
return re.findall(regexp, content)
def find_language(iat, sample, content):
dotnet = False
cpp_count = 0
found = None
# VB check
if check_module(iat, 'VB'):
self.log('info', "{0} - Possible language: Visual Basic".format(sample.name))
return True
# .NET check
if check_module(iat, 'mscoree.dll') and not found:
dotnet = True
found = '.NET'
# C DLL check
if not found and (check_module(iat, 'msvcr') or check_module(iat, 'MSVCR') or check_module(iat, 'c++')):
cpp_count += 1
if not found:
data = get_strings(content)
if is_cpp(data, cpp_count) and not found:
found = 'CPP'
if not found and cpp_count == 1:
found = 'C'
if not dotnet and is_delphi(data) and not found:
found = 'Delphi'
if dotnet and is_vbdotnet(data):
found = 'Visual Basic .NET'
if is_autoit(data) and not found:
found = 'AutoIt'
return found
if not self.__check_session():
return
if is_packed(self.pe):
self.log('warning', "Probably packed, the language guess might be unreliable")
language = find_language(
get_iat(self.pe),
__sessions__.current.file,
__sessions__.current.file.data
)
if language:
self.log('info', "Probable language: {0}".format(bold(language)))
else:
self.log('error', "Programming language not identified")
return
if self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_pe = pefile.PE(sample_path)
except pefile.PEFormatError as e:
continue
cur_packed = ''
if is_packed(cur_pe):
cur_packed = 'Yes'
cur_language = find_language(
get_iat(cur_pe),
sample,
open(sample_path, 'rb').read()
)
if not cur_language:
continue
if cur_language == language:
matches.append([sample.name, sample.md5, cur_packed])
if matches:
self.log('table', dict(header=['Name', 'MD5', 'Is Packed'], rows=matches))
else:
self.log('info', "No matches found")
def sections(self):
if not self.__check_session():
return
rows = []
for section in self.pe.sections:
rows.append([
section.Name,
hex(section.VirtualAddress),
hex(section.Misc_VirtualSize),
section.SizeOfRawData,
section.get_entropy()
])
self.log('info', "PE Sections:")
self.log('table', dict(header=['Name', 'RVA', 'VirtualSize', 'RawDataSize', 'Entropy'], rows=rows))
def pehash(self):
if not HAVE_PEHASH:
self.log('error', "PEhash is missing. Please copy PEhash to the modules directory of Viper")
return
current_pehash = None
if __sessions__.is_set():
current_pehash = calculate_pehash(__sessions__.current.file.path)
self.log('info', "PEhash: {0}".format(bold(current_pehash)))
if self.args.all or self.args.cluster or self.args.scan:
db = Database()
samples = db.find(key='all')
rows = []
for sample in samples:
sample_path = get_sample_path(sample.sha256)
pe_hash = calculate_pehash(sample_path)
if pe_hash:
rows.append((sample.name, sample.md5, pe_hash))
if self.args.all:
self.log('info', "PEhash for all files:")
header = ['Name', 'MD5', 'PEhash']
self.log('table', dict(header=header, rows=rows))
elif self.args.cluster:
self.log('info', "Clustering files by PEhash...")
cluster = {}
for sample_name, sample_md5, pe_hash in rows:
cluster.setdefault(pe_hash, []).append([sample_name, sample_md5])
for item in cluster.items():
if len(item[1]) > 1:
self.log('info', "PEhash cluster {0}:".format(bold(item[0])))
self.log('table', dict(header=['Name', 'MD5'], rows=item[1]))
elif self.args.scan:
if __sessions__.is_set() and current_pehash:
self.log('info', "Finding matching samples...")
matches = []
for row in rows:
if row[1] == __sessions__.current.file.md5:
continue
if row[2] == current_pehash:
matches.append([row[0], row[1]])
if matches:
self.log('table', dict(header=['Name', 'MD5'], rows=matches))
else:
self.log('info', "No matches found")
def run(self):
super(PE, self).run()
if self.args is None:
return
if not HAVE_PEFILE:
self.log('error', "Missing dependency, install pefile (`pip install pefile`)")
return
if self.args.subname == 'imports':
self.imports()
elif self.args.subname == 'exports':
self.exports()
elif self.args.subname == 'resources':
self.resources()
elif self.args.subname == 'imphash':
self.imphash()
elif self.args.subname == 'compiletime':
self.compiletime()
elif self.args.subname == 'peid':
self.peid()
elif self.args.subname == 'security':
self.security()
elif self.args.subname == 'sections':
self.sections()
elif self.args.subname == 'language':
self.language()
elif self.args.subname == 'pehash':
self.pehash()
elif self.args.subname == 'entrypoint':
self.entrypoint()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/sh
test_description='basic tests of rev-list --disk-usage'
. ./test-lib.sh
# we want a mix of reachable and unreachable, as well as
# objects in the bitmapped pack and some outside of it
test_expect_success 'set up repository' '
test_commit --no-tag one &&
test_commit --no-tag two &&
git repack -adb &&
git reset --hard HEAD^ &&
test_commit --no-tag three &&
test_commit --no-tag four &&
git reset --hard HEAD^
'
# We don't want to hardcode sizes, because they depend on the exact details of
# packing, zlib, etc. We'll assume that the regular rev-list and cat-file
# machinery works and compare the --disk-usage output to that.
disk_usage_slow () {
git rev-list --no-object-names "$@" |
git cat-file --batch-check="%(objectsize:disk)" |
awk '{ i += $1 } END { print i }'
}
# check behavior with given rev-list options; note that
# whitespace is not preserved in args
check_du () {
args=$*
test_expect_success "generate expected size ($args)" "
disk_usage_slow $args >expect
"
test_expect_success "rev-list --disk-usage without bitmaps ($args)" "
git rev-list --disk-usage $args >actual &&
test_cmp expect actual
"
test_expect_success "rev-list --disk-usage with bitmaps ($args)" "
git rev-list --disk-usage --use-bitmap-index $args >actual &&
test_cmp expect actual
"
}
check_du HEAD
check_du --objects HEAD
check_du --objects HEAD^..HEAD
test_expect_success 'setup for --unpacked tests' '
git repack -adb &&
test_commit unpacked
'
check_du --all --objects --unpacked
# As mentioned above, don't use hardcode sizes as actual size, but use the
# output from git cat-file.
test_expect_success 'rev-list --disk-usage=human' '
git rev-list --objects HEAD --disk-usage=human >actual &&
disk_usage_slow --objects HEAD >actual_size &&
grep "$(cat actual_size) bytes" actual
'
test_expect_success 'rev-list --disk-usage=human with bitmaps' '
git rev-list --objects HEAD --use-bitmap-index --disk-usage=human >actual &&
disk_usage_slow --objects HEAD >actual_size &&
grep "$(cat actual_size) bytes" actual
'
test_expect_success 'rev-list use --disk-usage unproperly' '
test_must_fail git rev-list --objects HEAD --disk-usage=typo 2>err &&
cat >expect <<-\EOF &&
fatal: invalid value for '\''--disk-usage=<format>'\'': '\''typo'\'', the only allowed format is '\''human'\''
EOF
test_cmp err expect
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t6115-rev-list-du.sh
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
#filename = 'francois_filon_pure_3.wav'
#filename_1 ='francois_filon_pure_3.xml'
#word ='je'
#word_2='le'
#word_3='qui'
#word_4='dans'
#word_5='de'
def calcul_matrix_je_le_qui_dans_de_192_matrix_compare_df_good(filename, filename_1,word,word_2,word_3,word_4,word_5):
#==============================================================================
# this is the parti for the 'je' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
je_compare_1 = matrix_all_step_new_1
je_compare_2 = matrix_all_step_new_2
je_compare_3 = matrix_all_step_new_3
je_compare_4 = matrix_all_step_new_4
je_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'je' end
#==============================================================================
#np.savez('je_le_qui_dans_de_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# # # # # # # # demain, je continue ici
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# this is the parti for the 'le' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_2)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
le_compare_1 = matrix_all_step_new_1
le_compare_2 = matrix_all_step_new_2
le_compare_3 = matrix_all_step_new_3
le_compare_4 = matrix_all_step_new_4
le_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'le' end
#==============================================================================
#==============================================================================
# this is the parti for the 'qui' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_3)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
qui_compare_1 = matrix_all_step_new_1
qui_compare_2 = matrix_all_step_new_2
qui_compare_3 = matrix_all_step_new_3
qui_compare_4 = matrix_all_step_new_4
qui_compare_5 = matrix_all_step_new_5
#==============================================================================
# this is the parti for the 'dans' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_4)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
dans_compare_1 = matrix_all_step_new_1
dans_compare_2 = matrix_all_step_new_2
dans_compare_3 = matrix_all_step_new_3
dans_compare_4 = matrix_all_step_new_4
dans_compare_5 = matrix_all_step_new_5
#==============================================================================
# this is the parti for the 'de' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_5)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
de_compare_1 = matrix_all_step_new_1
de_compare_2 = matrix_all_step_new_2
de_compare_3 = matrix_all_step_new_3
de_compare_4 = matrix_all_step_new_4
de_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'le' end
#==============================================================================
np.savez('je_le_qui_dans_de_192_matrix_compare.npz',je_compare_1,je_compare_2,je_compare_3,je_compare_4,je_compare_5,le_compare_1,le_compare_2,le_compare_3,le_compare_4,le_compare_5,qui_compare_1,qui_compare_2,qui_compare_3,qui_compare_4,qui_compare_5,dans_compare_1,dans_compare_2,dans_compare_3,dans_compare_4,dans_compare_5,de_compare_1,de_compare_2,de_compare_3,de_compare_4,de_compare_5)
finish_2=1
return finish_2
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.server.cli;
import org.elasticsearch.test.ESTestCase;
import java.util.List;
import static org.elasticsearch.server.cli.OverheadSystemMemoryInfo.SERVER_CLI_OVERHEAD;
import static org.hamcrest.Matchers.is;
public class OverheadSystemMemoryInfoTests extends ESTestCase {
private static final long TRUE_SYSTEM_MEMORY = 1024 * 1024 * 1024L;
public void testNoOptions() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(List.of(), delegateSystemMemoryInfo());
assertThat(memoryInfo.availableSystemMemory(), is(TRUE_SYSTEM_MEMORY - SERVER_CLI_OVERHEAD));
}
public void testNoOverrides() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(List.of("-Da=b", "-Dx=y"), delegateSystemMemoryInfo());
assertThat(memoryInfo.availableSystemMemory(), is(TRUE_SYSTEM_MEMORY - SERVER_CLI_OVERHEAD));
}
public void testValidSingleOverride() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(
List.of("-Des.total_memory_overhead_bytes=50000"),
delegateSystemMemoryInfo()
);
assertThat(memoryInfo.availableSystemMemory(), is(TRUE_SYSTEM_MEMORY - 50000));
}
public void testValidOverrideInList() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(
List.of("-Da=b", "-Des.total_memory_overhead_bytes=50000", "-Dx=y"),
delegateSystemMemoryInfo()
);
assertThat(memoryInfo.availableSystemMemory(), is(TRUE_SYSTEM_MEMORY - 50000));
}
public void testMultipleValidOverridesInList() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(
List.of("-Des.total_memory_overhead_bytes=50000", "-Da=b", "-Des.total_memory_overhead_bytes=100000", "-Dx=y"),
delegateSystemMemoryInfo()
);
assertThat(memoryInfo.availableSystemMemory(), is(TRUE_SYSTEM_MEMORY - 100000));
}
public void testNegativeOverride() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(
List.of("-Da=b", "-Des.total_memory_overhead_bytes=-123", "-Dx=y"),
delegateSystemMemoryInfo()
);
try {
memoryInfo.availableSystemMemory();
fail("expected to fail");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), is("Negative bytes size specified in [-Des.total_memory_overhead_bytes=-123]"));
}
}
public void testUnparsableOverride() {
final SystemMemoryInfo memoryInfo = new OverheadSystemMemoryInfo(
List.of("-Da=b", "-Des.total_memory_overhead_bytes=invalid", "-Dx=y"),
delegateSystemMemoryInfo()
);
try {
memoryInfo.availableSystemMemory();
fail("expected to fail");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), is("Unable to parse number of bytes from [-Des.total_memory_overhead_bytes=invalid]"));
}
}
private static SystemMemoryInfo delegateSystemMemoryInfo() {
return () -> TRUE_SYSTEM_MEMORY;
}
}
|
java
|
github
|
https://github.com/elastic/elasticsearch
|
distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/OverheadSystemMemoryInfoTests.java
|
<div ngListbox aria-label="Amenities" orientation="horizontal" selectionMode="explicit" multi>
@for (amenity of amenities; track amenity) {
<div ngOption [value]="amenity" [label]="amenity">
<span class="option-label">{{ amenity }}</span>
</div>
}
</div>
|
html
|
github
|
https://github.com/angular/angular
|
adev/src/content/examples/aria/listbox/src/horizontal/app/app.html
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py
to support training of pruned models
*******************************************************************
* A simple working training script with support for model pruning *
*******************************************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Set up sparsity
sparsity = pruning.setup_gradual_sparsity(self.global_step)
# Create mask update op
mask_update_op = pruning.add_mask_update_ip(sparsity)
# Run training.
learning.train(train_op,
my_log_dir,
mask_update_op)
see contrib/slim/python/slim/learning.py for additional examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as _slim
_USE_DEFAULT = 0
train_step = _slim.learning.train_step
def train(train_op,
logdir,
mask_update_op,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Wrapper around tf-slim's train function.
Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
mask_update_op: Operation that upon execution updates the weight masks and
thresholds.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is
greater than 'number_of_steps'. If the value is left as None, training
proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer, or a list of
them. If the argument is supplied, gradient updates will be synchronous.
If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
def train_step_with_pruning_fn(sess, train_op, global_step,
train_step_kwargs):
total_loss, should_stop = train_step_fn(sess, train_op, global_step,
train_step_kwargs)
sess.run(mask_update_op)
return total_loss, should_stop
total_loss, _ = _slim.learning.train(
train_op,
logdir,
train_step_fn=train_step_with_pruning_fn,
train_step_kwargs=train_step_kwargs,
log_every_n_steps=log_every_n_steps,
graph=graph,
master=master,
is_chief=is_chief,
global_step=global_step,
number_of_steps=number_of_steps,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
init_fn=init_fn,
ready_op=ready_op,
summary_op=summary_op,
save_summaries_secs=save_summaries_secs,
summary_writer=summary_writer,
startup_delay_steps=startup_delay_steps,
saver=saver,
save_interval_secs=save_interval_secs,
sync_optimizer=sync_optimizer,
session_config=session_config,
trace_every_n_steps=trace_every_n_steps)
return total_loss
|
unknown
|
codeparrot/codeparrot-clean
| ||
//go:build windows
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package winstats
import (
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
)
func TestPerfCounter(t *testing.T) {
testCases := map[string]struct {
counter string
skipCheck bool
expectErr bool
expectedErrMsg string
}{
"CPU Query": {
counter: cpuQuery,
// TODO: remove skip once the test flake for CPU Query has been fixed.
skipCheck: true,
},
"Memory Prvate Working Set Query": {
counter: memoryPrivWorkingSetQuery,
},
"Memory Committed Bytes Query": {
counter: memoryCommittedBytesQuery,
},
"Net Adapter Packets Received/sec Query": {
counter: packetsReceivedPerSecondQuery,
skipCheck: true,
},
"Net Adapter Packets Sent/sec Query": {
counter: packetsSentPerSecondQuery,
skipCheck: true,
},
"Net Adapter Bytes Received/sec Query": {
counter: bytesReceivedPerSecondQuery,
skipCheck: true,
},
"Net Adapter Bytes Sent/sec Query": {
counter: bytesSentPerSecondQuery,
skipCheck: true,
},
"Net Adapter Packets Received Discarded Query": {
counter: packetsReceivedDiscardedQuery,
skipCheck: true,
},
"Net Adapter Packets Received Errors Query": {
counter: packetsReceivedErrorsQuery,
skipCheck: true,
},
"Net Adapter Packets Outbound Discarded Query": {
counter: packetsOutboundDiscardedQuery,
skipCheck: true,
},
"Net Adapter Packets Outbound Errors Query": {
counter: packetsOutboundErrorsQuery,
skipCheck: true,
},
"Invalid Query": {
counter: "foo",
expectErr: true,
expectedErrMsg: "unable to add process counter: foo. Error code is c0000bc0",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
counter, err := newPerfCounter(tc.counter)
if tc.expectErr {
if err == nil || err.Error() != tc.expectedErrMsg {
t.Fatalf("expected error message `%s` but got `%v`", tc.expectedErrMsg, err)
}
return
}
// There are some counters that we can't expect to see any non-zero values, like the
// networking-related counters.
if tc.skipCheck {
return
}
// Wait until we get a non-zero perf counter data.
if pollErr := wait.Poll(100*time.Millisecond, 5*perfCounterUpdatePeriod, func() (bool, error) {
data, err := counter.getData()
if err != nil {
return false, err
}
if data != 0 {
return true, nil
}
return false, nil
}); pollErr != nil {
t.Fatalf("Encountered error: `%v'", pollErr)
return
}
// Check that we have at least one non-zero value in the data list.
if pollErr := wait.Poll(100*time.Millisecond, 5*perfCounterUpdatePeriod, func() (bool, error) {
dataList, err := counter.getDataList()
if err != nil {
return false, err
}
for _, value := range dataList {
if value != 0 {
return true, nil
}
}
return false, nil
}); pollErr != nil {
t.Fatalf("Encountered error: `%v'", pollErr)
}
})
}
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/kubelet/winstats/perfcounters_test.go
|
##############################################################################################
# Copyright 2014-2015 Cloud Media Sdn. Bhd.
#
# This file is part of Xuan Application Development SDK.
#
# Xuan Application Development SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xuan Application Development SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xuan Application Development SDK. If not, see <http://www.gnu.org/licenses/>.
##############################################################################################
from threading import Thread
'''
Timeout an call
NOTE: The call is NOT interrupted, but raise a TimeoutError instead.
timeout:Number - Timeout in seconds
'''
def timelimit(timeout):
def internal(function):
def internal2(*args, **kw):
class Calculator(Thread):
def __init__(self):
pass
def run(self):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.test.base import BaseTestCase
from volume_manager.models.node_volumes import NodeVolumes
from volume_manager.objects.volumes import VolumeObject
class TestExtension(BaseTestCase):
def test_delete_by_node_ids(self):
volumes = [
{'node_id': 1, 'volumes': 'volume_1'},
{'node_id': 2, 'volumes': 'volume_2'},
{'node_id': 3, 'volumes': 'volume_3'}]
for volume in volumes:
self.db.add(NodeVolumes(**volume))
self.db.commit()
self.assertEqual(self.db.query(NodeVolumes).count(), 3)
VolumeObject.delete_by_node_ids([1, 2])
self.assertEqual(self.db.query(NodeVolumes).count(), 1)
volume = self.db.query(NodeVolumes).first()
self.assertEqual(volume.node_id, 3)
self.assertEqual(volume.volumes, 'volume_3')
|
unknown
|
codeparrot/codeparrot-clean
| ||
import json
import orca
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from .. import server
@pytest.fixture
def tapp():
server.app.config['TESTING'] = True
return server.app.test_client()
@pytest.fixture(scope='module')
def dfa():
return pd.DataFrame(
{'a': [100, 200, 300, 200, 100]},
index=['v', 'w', 'x', 'y', 'z'])
@pytest.fixture(scope='module')
def dfb():
return pd.DataFrame(
{'b': [70, 80, 90],
'a_id': ['w', 'v', 'z']},
index=['a', 'b', 'b'])
@pytest.fixture(scope='module')
def dfa_col(dfa):
return pd.Series([2, 4, 6, 8, 10], index=dfa.index)
@pytest.fixture(scope='module')
def dfb_col(dfb):
return pd.Series([10, 20, 30], index=dfb.index)
@pytest.fixture(scope='module')
def dfa_factor():
return 0.5
@pytest.fixture(scope='module')
def dfb_factor():
return 2
@pytest.fixture(scope='module', autouse=True)
def setup_orca(dfa, dfb, dfa_col, dfb_col, dfa_factor, dfb_factor):
orca.add_injectable('a_factor', dfa_factor)
@orca.injectable()
def b_factor():
return dfb_factor
orca.add_table('dfa', dfa)
@orca.table('dfb')
def dfb_table():
return dfb
orca.add_column('dfa', 'acol', dfa_col)
orca.add_column('dfb', 'bcol', dfb_col)
@orca.column('dfa')
def extra_acol(a_factor):
return dfa_col * a_factor
@orca.column('dfb')
def extra_bcol(b_factor):
return dfb_col * b_factor
orca.broadcast('dfb', 'dfa', cast_on='a_id', onto_index=True)
@orca.step()
def test_step(dfa, dfb):
pass
def test_schema(tapp):
rv = tapp.get('/schema')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['tables']) == {'dfa', 'dfb'}
assert set(data['columns']['dfa']) == {'extra_acol', 'acol', 'a'}
assert set(data['columns']['dfb']) == {'bcol', 'extra_bcol', 'a_id', 'b'}
assert data['steps'] == ['test_step']
assert set(data['injectables']) == {'a_factor', 'b_factor'}
assert data['broadcasts'] == [['dfb', 'dfa']]
def test_list_tables(tapp):
rv = tapp.get('/tables')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['tables']) == {'dfa', 'dfb'}
def test_table_info(tapp):
rv = tapp.get('/tables/dfa/info')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'extra_acol' in data
def test_table_preview(tapp):
rv = tapp.get('/tables/dfa/preview')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == orca.get_table('dfa').to_frame().to_json(orient='split')
def test_table_preview_404(tapp):
rv = tapp.get('/tables/not_a_table/preview')
assert rv.status_code == 404
def test_table_describe(tapp):
rv = tapp.get('/tables/dfa/describe')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == (orca.get_table('dfa')
.to_frame()
.describe()
.to_json(orient='split'))
def test_table_definition_frame(tapp):
rv = tapp.get('/tables/dfa/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'dataframe'}
def test_table_definition_func(tapp):
rv = tapp.get('/tables/dfb/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.table('dfb')\n"
" def dfb_table():\n"
" return dfb\n")
assert 'dfb_table' in data['html']
def test_table_csv(tapp):
rv = tapp.get('/tables/dfb/csv')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert rv.mimetype == 'text/csv'
assert data == orca.get_table('dfb').to_frame().to_csv()
def test_list_table_columns(tapp):
rv = tapp.get('/tables/dfb/columns')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['columns']) == {'a_id', 'b', 'bcol', 'extra_bcol'}
def test_column_definition_local(tapp):
rv = tapp.get('/tables/dfa/columns/a/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'local'}
def test_column_definition_series(tapp):
rv = tapp.get('/tables/dfa/columns/acol/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'series'}
def test_column_definition_func(tapp):
rv = tapp.get('/tables/dfa/columns/extra_acol/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.column('dfa')\n"
" def extra_acol(a_factor):\n"
" return dfa_col * a_factor\n")
assert 'extra_acol' in data['html']
def test_column_describe(tapp):
rv = tapp.get('/tables/dfa/columns/extra_acol/describe')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == (orca.get_table('dfa')
.extra_acol.describe()
.to_json(orient='split'))
def test_column_csv(tapp, dfa):
rv = tapp.get('/tables/dfa/columns/a/csv')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == dfa.a.to_csv(path_or_buf=None)
def test_no_column_404(tapp):
rv = tapp.get('/tables/dfa/columns/not-a-column/csv')
assert rv.status_code == 404
def test_list_injectables(tapp):
rv = tapp.get('/injectables')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['injectables']) == {'a_factor', 'b_factor'}
def test_injectable_repr(tapp, dfb_factor):
rv = tapp.get('/injectables/b_factor/repr')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': str(type(42)), 'repr': '2'}
def test_no_injectable_404(tapp):
rv = tapp.get('/injectables/nope/repr')
assert rv.status_code == 404
def test_injectable_definition_var(tapp):
rv = tapp.get('/injectables/a_factor/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'variable'}
def test_injectable_definition_func(tapp):
rv = tapp.get('/injectables/b_factor/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.injectable()\n"
" def b_factor():\n"
" return dfb_factor\n")
assert 'b_factor' in data['html']
def test_list_broadcasts(tapp):
rv = tapp.get('/broadcasts')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'broadcasts': [{'cast': 'dfb', 'onto': 'dfa'}]}
def test_broadcast_definition(tapp):
rv = tapp.get('/broadcasts/dfb/dfa/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {
'cast': 'dfb',
'onto': 'dfa',
'cast_on': 'a_id',
'onto_on': None,
'cast_index': False,
'onto_index': True}
def test_no_broadcast_404(tapp):
rv = tapp.get('/broadcasts/table1/table2/definition')
assert rv.status_code == 404
def test_list_steps(tapp):
rv = tapp.get('/steps')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'steps': ['test_step']}
def test_no_step_404(tapp):
rv = tapp.get('/steps/not_a_step/definition')
assert rv.status_code == 404
def test_step_definition(tapp):
rv = tapp.get('/steps/test_step/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.step()\n"
" def test_step(dfa, dfb):\n"
" pass\n")
assert 'test_step' in data['html']
def test_table_groupbyagg_errors(tapp):
# non-existant column
rv = tapp.get('/tables/dfa/groupbyagg?column=notacolumn')
assert rv.status_code == 400
# both by and level missing
rv = tapp.get('/tables/dfa/groupbyagg?column=a')
assert rv.status_code == 400
# bad or missing agg type
rv = tapp.get('/tables/dfa/groupbyagg?column=a&level=0&agg=notanagg')
assert rv.status_code == 400
def test_table_groupbyagg_by_size(tapp):
rv = tapp.get('/tables/dfa/groupbyagg?by=a&column=a&agg=size')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([2, 2, 1], index=[100, 200, 300]),
check_names=False)
def test_table_groupbyagg_level_mean(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=mean')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 85], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_median(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=median')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 85], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_sum(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=sum')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 170], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_std(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=std')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series(
[np.nan, pd.Series([80, 90]).std()],
index=['a', 'b'], name='b'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import feedparser
import re
feedlist=['http://today.reuters.com/rss/topNews',
'http://today.reuters.com/rss/domesticNews',
'http://today.reuters.com/rss/worldNews',
'http://hosted.ap.org/lineups/TOPHEADS-rss_2.0.xml',
'http://hosted.ap.org/lineups/USHEADS-rss_2.0.xml',
'http://hosted.ap.org/lineups/WORLDHEADS-rss_2.0.xml',
'http://hosted.ap.org/lineups/POLITICSHEADS-rss_2.0.xml',
'http://www.nytimes.com/services/xml/rss/nyt/HomePage.xml',
'http://www.nytimes.com/services/xml/rss/nyt/International.xml',
'http://news.google.com/?output=rss',
'http://feeds.salon.com/salon/news',
'http://www.foxnews.com/xmlfeed/rss/0,4313,0,00.rss',
'http://www.foxnews.com/xmlfeed/rss/0,4313,80,00.rss',
'http://www.foxnews.com/xmlfeed/rss/0,4313,81,00.rss',
'http://rss.cnn.com/rss/edition.rss',
'http://rss.cnn.com/rss/edition_world.rss',
'http://rss.cnn.com/rss/edition_us.rss']
def stripHTML(h):
p=''
s=0
for c in h:
if c=='<': s=1
elif c=='>':
s=0
p+=' '
elif s==0: p+=c
return p
def separatewords(text):
splitter=re.compile('\\W*')
return [s.lower() for s in splitter.split(text) if len(s)>3]
def getarticlewords():
allwords={}
articlewords=[]
articletitles=[]
ec=0
# Loop over every feed
for feed in feedlist:
f=feedparser.parse(feed)
# Loop over every article
for e in f.entries:
# Ignore identical articles
if e.title in articletitles: continue
# Extract the words
txt=e.title.encode('utf8')+stripHTML(e.description.encode('utf8'))
words=separatewords(txt)
articlewords.append({})
articletitles.append(e.title)
# Increase the counts for this word in allwords and in articlewords
for word in words:
allwords.setdefault(word,0)
allwords[word]+=1
articlewords[ec].setdefault(word,0)
articlewords[ec][word]+=1
ec+=1
return allwords,articlewords,articletitles
def makematrix(allw,articlew):
wordvec=[]
# Only take words that are common but not too common
for w,c in allw.items():
if c>3 and c<len(articlew)*0.6:
wordvec.append(w)
# Create the word matrix
l1=[[(word in f and f[word] or 0) for word in wordvec] for f in articlew]
return l1,wordvec
from numpy import *
def showfeatures(w,h,titles,wordvec,out='features.txt'):
outfile=file(out,'w')
pc,wc=shape(h)
toppatterns=[[] for i in range(len(titles))]
patternnames=[]
# Loop over all the features
for i in range(pc):
slist=[]
# Create a list of words and their weights
for j in range(wc):
slist.append((h[i,j],wordvec[j]))
# Reverse sort the word list
slist.sort()
slist.reverse()
# Print the first six elements
n=[s[1] for s in slist[0:6]]
outfile.write(str(n)+'\n')
patternnames.append(n)
# Create a list of articles for this feature
flist=[]
for j in range(len(titles)):
# Add the article with its weight
flist.append((w[j,i],titles[j]))
toppatterns[j].append((w[j,i],i,titles[j]))
# Reverse sort the list
flist.sort()
flist.reverse()
# Show the top 3 articles
for f in flist[0:3]:
outfile.write(str(f)+'\n')
outfile.write('\n')
outfile.close()
# Return the pattern names for later use
return toppatterns,patternnames
def showarticles(titles,toppatterns,patternnames,out='articles.txt'):
outfile=file(out,'w')
# Loop over all the articles
for j in range(len(titles)):
outfile.write(titles[j].encode('utf8')+'\n')
# Get the top features for this article and
# reverse sort them
toppatterns[j].sort()
toppatterns[j].reverse()
# Print the top three patterns
for i in range(3):
outfile.write(str(toppatterns[j][i][0])+' '+
str(patternnames[toppatterns[j][i][1]])+'\n')
outfile.write('\n')
outfile.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher)
pubdate = models.DateField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class DepartmentStore(Store):
chain = models.CharField(max_length=255)
def __str__(self):
return '%s - %s ' % (self.chain, self.name)
@python_2_unicode_compatible
class Employee(models.Model):
# The order of these fields matter, do not change. Certain backends
# rely on field ordering to perform database conversions, and this
# model helps to test that.
first_name = models.CharField(max_length=20)
manager = models.BooleanField(default=False)
last_name = models.CharField(max_length=20)
store = models.ForeignKey(Store)
age = models.IntegerField()
salary = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=200)
motto = models.CharField(max_length=200, null=True, blank=True)
ticker_name = models.CharField(max_length=10, null=True, blank=True)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return ('Company(name=%s, motto=%s, ticker_name=%s, description=%s)'
% (self.name, self.motto, self.ticker_name, self.description)
)
@python_2_unicode_compatible
class Ticket(models.Model):
active_at = models.DateTimeField()
duration = models.DurationField()
def __str__(self):
return '{} - {}'.format(self.active_at, self.duration)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_tunnel import ApiParameters
from library.modules.bigip_tunnel import ModuleParameters
from library.modules.bigip_tunnel import ModuleManager
from library.modules.bigip_tunnel import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_tunnel import ApiParameters
from ansible.modules.network.f5.bigip_tunnel import ModuleParameters
from ansible.modules.network.f5.bigip_tunnel import ModuleManager
from ansible.modules.network.f5.bigip_tunnel import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
profile='ipip',
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.profile == '/Common/ipip'
def test_api_parameters(self):
args = load_fixture('load_net_tunnel_1.json')
p = ApiParameters(params=args)
assert p.name == 'tunnel1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
profile='ipip',
local_address='2.2.2.2.',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
|
unknown
|
codeparrot/codeparrot-clean
| ||
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
import sys
import readline
import cmd
import shlex
import string
import re
import os
import os.path
import traceback
try:
import ConfigParser as configparser
except ImportError:
import configparser
import random
import code
try:
import __builtin__ as builtins
except ImportError:
import builtins
from multiprocessing.pool import ThreadPool
import time
import logging
import traceback
import rpyc
import rpyc.utils.classic
from .PythonCompleter import PupyCompleter
from .PupyErrors import PupyModuleExit, PupyModuleError
from .PupyModule import PupyArgumentParser
from .PupyJob import PupyJob
import argparse
from pupysh import __version__
import copy
from functools import partial
BANNER="""
_____ _ _ _
___ ___ | _ |_ _ ___ _ _ ___| |_ ___| | | ___ ___
|___|___| | __| | | . | | | |_ -| | -_| | | |___|___|
|__| |___| _|_ | |___|_|_|___|_|_|
|_| |___|
%s
"""%__version__
def color_real(s, color, prompt=False, colors_enabled=True):
""" color a string using ansi escape characters. set prompt to true to add marks for readline to see invisible portions of the prompt
cf. http://stackoverflow.com/questions/9468435/look-how-to-fix-column-calculation-in-python-readline-if-use-color-prompt"""
if s is None:
return ""
s=str(s)
if not colors_enabled:
return s
res=s
COLOR_STOP="\033[0m"
prompt_stop=""
prompt_start=""
if prompt:
prompt_stop="\002"
prompt_start="\001"
if prompt:
COLOR_STOP=prompt_start+COLOR_STOP+prompt_stop
if color.lower()=="random":
color=random.choice(["blue","red","green","yellow"])
if color.lower()=="blue":
res=prompt_start+"\033[34m"+prompt_stop+s+COLOR_STOP
if color.lower()=="red":
res=prompt_start+"\033[31m"+prompt_stop+s+COLOR_STOP
if color.lower()=="green":
res=prompt_start+"\033[32m"+prompt_stop+s+COLOR_STOP
if color.lower()=="yellow":
res=prompt_start+"\033[33m"+prompt_stop+s+COLOR_STOP
if color.lower()=="grey":
res=prompt_start+"\033[37m"+prompt_stop+s+COLOR_STOP
if color.lower()=="darkgrey":
res=prompt_start+"\033[1;30m"+prompt_stop+s+COLOR_STOP
return res
def get_columns_size(l):
size_dic={}
for d in l:
for i,k in d.iteritems():
if type(k) is not str:
k=str(k)
if not i in size_dic:
size_dic[i]=len(k)
elif size_dic[i]<len(k):
size_dic[i]=len(k)
return size_dic
def obj2utf8(obj):
if type(obj)==dict:
for k in obj:
obj[k]=obj2utf8(obj[k])
elif type(obj)==list:
for i in range(0,len(obj)):
obj[i]=obj2utf8(obj[i])
elif type(obj)==tuple:
obj=list(obj)
for i in range(0,len(obj)):
obj[i]=obj2utf8(obj[i])
obj=tuple(obj)
elif type(obj)==unicode or type(obj)==str:
return obj.encode('utf8', errors='replace')
else:
obj=str(obj)
return obj
class PupyCmd(cmd.Cmd):
def __init__(self, pupsrv, configFile="pupy.conf"):
cmd.Cmd.__init__(self)
self.pupsrv=pupsrv
self.pupsrv.register_handler(self)
self.config = configparser.ConfigParser()
self.config.read(configFile)
self.init_readline()
global color
try:
color = partial(color_real, colors_enabled=self.config.getboolean("cmdline","colors"))
except Exception:
color = color_real
self.intro = color(BANNER, 'green')
self.prompt = color('>> ','blue', prompt=True)
self.doc_header = 'Available commands :\n'
self.complete_space=['run']
self.default_filter=None
try:
if not self.config.getboolean("cmdline","display_banner"):
self.intro=""
except Exception:
pass
self.aliases={}
try:
for command, alias in self.config.items("aliases"):
logging.debug("adding alias: %s => %s"%(command, alias))
self.aliases[command]=alias
except Exception as e:
logging.warning("error while parsing aliases from pupy.conf ! %s"%str(traceback.format_exc()))
@staticmethod
def table_format(diclist, wl=[], bl=[]):
"""
this function takes a list a dictionaries to display in columns. Dictionnaries keys are the columns names.
All dictionaries must have the same keys.
wl is a whitelist of column names to display
bl is a blacklist of columns names to hide
"""
res=""
if diclist:
diclist=obj2utf8(diclist)
keys=[x for x in diclist[0].iterkeys()]
if wl:
keys=[x for x in wl if x in keys]
if bl:
keys=[x for x in keys if x not in bl]
titlesdic={}
for k in keys:
titlesdic[k]=k
diclist.insert(0,titlesdic)
colsize=get_columns_size(diclist)
i=0
for c in diclist:
if i==1:
res+="-"*sum([k+2 for k in [y for x,y in colsize.iteritems() if x in titlesdic]])+"\n"
i+=1
for name in keys:
if c[name] is not unicode:
value=str(c[name]).strip()
else:
value=c[name].strip()
utf8align=len(value)-len(value.decode('utf8',errors='replace'))
res+=value.ljust(colsize[name]+2+utf8align)
res+="\n"
return res
def default(self, line):
tab=line.split(" ",1)
if tab[0] in self.aliases:
arg_parser = PupyArgumentParser(prog=tab[0], add_help=False)
arg_parser.add_argument('-f', '--filter', metavar='<client filter>', help="filter to a subset of all clients. All fields available in the \"info\" module can be used. example: run get_info -f 'platform:win release:7 os_arch:64'")
arg_parser.add_argument('--bg', action='store_true', help="run in background")
arg_parser.add_argument('arguments', nargs=argparse.REMAINDER, metavar='<arguments>', help="module arguments")
if len(tab)==1:
self.do_run(self.aliases[tab[0]])
else:
left=[]
try:
modargs,left=arg_parser.parse_known_args(shlex.split(tab[1]))
except PupyModuleExit:
return
#putting run arguments (-f and --bg) back at their place in case of aliases
newargs_str=""
if modargs.bg:
newargs_str+=" --bg"
if modargs.filter:
newargs_str+=" -f '"+modargs.filter.replace("'","'\\''")+"'"
newargs_str+=" "+self.aliases[tab[0]]
if left:
newargs_str+=" "+' '.join(left)
if modargs.arguments:
newargs_str+=" '"+(' '.join(modargs.arguments)).replace("'","'\\''")+"'"
self.do_run(newargs_str.strip())
else:
self.display_error("Unknown syntax: %s"%line)
def init_readline(self):
try:
readline.read_history_file(".pupy_history")
except Exception:
pass
self.init_completer()
def cmdloop(self, intro=None):
try:
cmd.Cmd.cmdloop(self, intro)
except KeyboardInterrupt as e:
self.stdout.write('\n')
self.cmdloop(intro="")
def init_completer(self):
readline.set_pre_input_hook(self.pre_input_hook)
readline.set_completer_delims(" \t")
def completenames(self, text, *ignored):
dotext = 'do_'+text
if text in self.complete_space:
return [a[3:]+" " for a in self.get_names() if a.startswith(dotext)]+[x+" " for x in self.aliases.iterkeys() if x.startswith(text)]
return [a[3:] for a in self.get_names() if a.startswith(dotext)]+[x for x in self.aliases.iterkeys() if x.startswith(text)]
def pre_input_hook(self):
#readline.redisplay()
pass
def emptyline(self):
""" do nothing when an emptyline is entered """
pass
def do_help(self, arg):
""" show this help """
if arg:
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append((cmd, getattr(self, name).__doc__))
else:
cmds_doc.append((cmd, ""))
for name in [x for x in self.aliases.iterkeys()]:
cmds_doc.append((name, self.pupsrv.get_module(self.aliases[name]).__doc__))
self.stdout.write("%s\n"%str(self.doc_header))
for command,doc in cmds_doc:
self.stdout.write("- {:<10} {}\n".format(command, color(doc,'grey')))
@staticmethod
def format_log(msg):
""" return a formated log line """
return msg.rstrip()+"\n"
@staticmethod
def format_error(msg):
""" return a formated error log line """
return color('[-] ','red')+msg.rstrip()+"\n"
@staticmethod
def format_warning(msg):
""" return a formated warning log line """
return color('[!] ','yellow')+msg.rstrip()+"\n"
@staticmethod
def format_success(msg):
""" return a formated info log line """
return color('[+] ','green')+msg.rstrip()+"\n"
@staticmethod
def format_info(msg):
""" return a formated info log line """
return color('[%] ','darkgrey')+msg.rstrip()+"\n"
@staticmethod
def format_srvinfo(msg):
""" return a formated info log line """
return color('[*] ','blue')+msg.rstrip()+"\n"
@staticmethod
def format_section(msg):
""" return a formated info log line """
return color('#>#> ','green')+color(msg.rstrip(),'darkgrey')+color(' <#<#','green')+"\n"
def display(self, msg, modifier=None):
if not type(msg) is unicode:
msg=str(msg)
if msg:
if modifier=="error":
sys.stdout.write(PupyCmd.format_error(msg))
elif modifier=="success":
sys.stdout.write(PupyCmd.format_success(msg))
elif modifier=="info":
sys.stdout.write(PupyCmd.format_info(msg))
elif modifier=="srvinfo":
sys.stdout.write(PupyCmd.format_srvinfo(msg))
#readline.redisplay()
elif modifier=="warning":
sys.stdout.write(PupyCmd.format_warning(msg))
else:
sys.stdout.write(PupyCmd.format_log(msg))
def display_srvinfo(self, msg):
return self.display(msg, modifier="srvinfo")
def display_success(self, msg):
return self.display(msg, modifier="success")
def display_error(self, msg):
return self.display(msg, modifier="error")
def display_warning(self, msg):
return self.display(msg, modifier="warning")
def display_info(self, msg):
return self.display(msg, modifier="info")
def postcmd(self, stop, line):
readline.write_history_file('.pupy_history')
def do_list_modules(self, arg):
""" List available modules with a brief description """
for m,d in self.pupsrv.list_modules():
self.stdout.write("{:<20} {}\n".format(m, color(d,'grey')))
def do_clients(self, arg):
""" alias for sessions """
self.do_sessions(arg)
def do_sessions(self, arg):
""" list/interact with established sessions """
arg_parser = PupyArgumentParser(prog='sessions', description=self.do_sessions.__doc__)
arg_parser.add_argument('-i', '--interact', metavar='<filter>', help="change the default --filter value for other commands")
arg_parser.add_argument('-g', '--global-reset', action='store_true', help="reset --interact to the default global behavior")
arg_parser.add_argument('-l', dest='list', action='store_true', help='List all active sessions')
arg_parser.add_argument('-k', dest='kill', metavar='<id>', type=int, help='Kill the selected session')
try:
modargs=arg_parser.parse_args(shlex.split(arg))
except PupyModuleExit:
return
if modargs.global_reset:
self.default_filter=None
self.display_success("default filter reset to global !")
elif modargs.interact:
self.default_filter=modargs.interact
self.display_success("default filter set to %s"%self.default_filter)
elif modargs.kill:
selected_client = self.pupsrv.get_clients(modargs.kill)
if selected_client:
try:
selected_client[0].conn.exit()
except Exception:
pass
elif modargs.list or not arg:
client_list=self.pupsrv.get_clients_list()
self.display(PupyCmd.table_format([x.desc for x in client_list], wl=["id", "user", "hostname", "platform", "release", "os_arch", "address"]))
def do_jobs(self, arg):
""" manage jobs """
arg_parser = PupyArgumentParser(prog='jobs', description='list or kill jobs')
arg_parser.add_argument('-k', '--kill', metavar='<job_id>', help="print the job current output before killing it")
arg_parser.add_argument('-l', '--list', action='store_true', help="list jobs")
arg_parser.add_argument('-p', '--print-output', metavar='<job_id>', help="print a job output")
try:
modargs=arg_parser.parse_args(shlex.split(arg))
except PupyModuleExit:
return
try:
if modargs.kill:
j=self.pupsrv.get_job(modargs.kill)
self.display(j.result_summary())
j.stop()
del j
self.display_success("job killed")
elif modargs.print_output:
j=self.pupsrv.get_job(modargs.print_output)
self.display(j.result_summary())
elif modargs.list:
if len(self.pupsrv.jobs)>0:
dictable=[]
for k,v in self.pupsrv.jobs.iteritems():
dic={"id":k, "job":str(v)}
status="running"
if v.is_finished():
status="finished"
dic["status"]=status
dic["clients_nb"]=str(v.get_clients_nb())
dictable.append(dic)
self.display(PupyCmd.table_format(dictable, wl=["id", "job", "clients_nb","status"]))
else:
self.display_error("No jobs are currently running !")
else: #display help
try:
arg_parser.parse_args(["-h"])
except PupyModuleExit:
return
except PupyModuleError as e:
self.display_error(e)
except Exception as e:
self.display_error(traceback.format_exc())
def do_python(self,arg):
""" start the local python interpreter (for debugging purposes) """
orig_exit=builtins.exit
orig_quit=builtins.quit
def disabled_exit(*args, **kwargs):
self.display_warning("exit() disabled ! use ctrl+D to exit the python shell")
builtins.exit=disabled_exit
builtins.quit=disabled_exit
oldcompleter=readline.get_completer()
try:
local_ns={"pupsrv":self.pupsrv}
readline.set_completer(PupyCompleter(local_ns=local_ns).complete)
readline.parse_and_bind('tab: complete')
code.interact(local=local_ns)
except Exception as e:
self.display_error(str(e))
finally:
readline.set_completer(oldcompleter)
readline.parse_and_bind('tab: complete')
builtins.exit=orig_exit
builtins.quit=orig_quit
def do_run(self, arg):
""" run a module on one or multiple clients"""
arg_parser = PupyArgumentParser(prog='run', description='run a module on one or multiple clients')
arg_parser.add_argument('module', metavar='<module>', help="module")
arg_parser.add_argument('-f', '--filter', metavar='<client filter>', default=self.default_filter ,help="filter to a subset of all clients. All fields available in the \"info\" module can be used. example: run get_info -f 'platform:win release:7 os_arch:64'")
arg_parser.add_argument('--bg', action='store_true', help="run in background")
arg_parser.add_argument('arguments', nargs=argparse.REMAINDER, metavar='<arguments>', help="module arguments")
pj=None
try:
modargs=arg_parser.parse_args(shlex.split(arg))
except PupyModuleExit:
return
if not modargs.arguments:
args=""
else:
args=modargs.arguments
selected_clients="*"
if modargs.filter:
selected_clients=modargs.filter
try:
mod=self.pupsrv.get_module(modargs.module)
except Exception as e:
self.display_error("%s : %s"%(modargs.module,str(e)))
return
if not mod:
self.display_error("unknown module %s !"%modargs.module)
return
#logging.debug("args passed to %s: %s"%(modargs.module,args))
l=self.pupsrv.get_clients(selected_clients)
if not l:
if not self.pupsrv.clients:
self.display_error("no clients currently connected")
else:
self.display_error("no clients match this search!")
return
try:
self.pupsrv.module_parse_args(modargs.module, args)
except PupyModuleExit:
return
if mod.max_clients!=0 and len(l)>mod.max_clients:
self.display_error("This module is limited to %s client(s) at a time and you selected %s clients"%(mod.max_clients, len(l)))
return
modjobs=[x for x in self.pupsrv.jobs.itervalues() if str(type(x.pupymodules[0]))== str(mod) and x.pupymodules[0].client in l]
#print [x for x in self.pupsrv.jobs.itervalues()]
#print modjobs
#if mod.unique_instance and len(modjobs)>=1:
# self.display_error("This module is limited to %s instances per client. Job(s) containing this modules are still running."%(len(modjobs)))
# return
pj=None
try:
interactive=False
if mod.daemon and mod.unique_instance and modjobs:
pj=modjobs[0]
else:
pj=PupyJob(self.pupsrv,"%s %s"%(modargs.module, args))
if len(l)==1 and not modargs.bg and not mod.daemon:
ps=mod(l[0], pj, stdout=self.stdout)
pj.add_module(ps)
interactive=True
else:
for c in l:
ps=mod(c, pj)
pj.add_module(ps)
pj.start(args)
if not modjobs:
if modargs.bg:
self.pupsrv.add_job(pj)
return
elif mod.daemon:
self.pupsrv.add_job(pj)
error=pj.interactive_wait()
if error and not modjobs:
pj.stop()
except KeyboardInterrupt:
self.display_warning("interrupting job ... (please wait)")
pj.interrupt()
self.display_warning("job interrupted")
if not interactive:
self.display(pj.result_summary())
if pj:
del pj
#text : word match
#line : complete line
def complete_run(self, text, line, begidx, endidx):
mline = line.partition(' ')[2]
joker=1
found_module=False
#handle autocompletion of modules with --filter argument
for x in shlex.split(mline):
if x in ("-f", "--filter"):#arguments with a param
joker+=1
elif x in ("--bg",):#arguments without parameter
pass
else:
joker-=1
if not x.startswith("-") and joker==0:
found_module=True
if joker<0:
return
if ((len(text)>0 and joker==0) or (len(text)==0 and not found_module and joker<=1)):
return [re.sub(r"(.*)\.pyc?$",r"\1",x) for x in os.listdir("./modules") if x.startswith(text) and not x=="__init__.py" and not x=="__init__.pyc"]
def do_exit(self, arg):
""" Quit Pupy Shell """
sys.exit()
def do_read(self, arg):
""" execute a list of commands from a file """
try:
if not arg:
self.display_error("usage: read <filename>")
return
with open(arg,'r') as f:
self.cmdqueue.extend(f.read().splitlines())
except Exception as e:
self.display_error(str(e))
def _complete_path(self, path=None):
"Perform completion of filesystem path."
if not path:
return os.listdir('.')
dirname, rest = os.path.split(path)
tmp = dirname if dirname else '.'
res = [os.path.join(dirname, p)
for p in os.listdir(tmp) if p.startswith(rest)]
# more than one match, or single match which does not exist (typo)
if len(res) > 1 or not os.path.exists(path):
return res
# resolved to a single directory, so return list of files below it
if os.path.isdir(path):
return [os.path.join(path, p) for p in os.listdir(path)]
# exact file match terminates this completion
return [path + ' ']
def complete_read(self, text, line, begidx, endidx):
tab = line.split(' ',1)
if len(tab)>=2:
return self._complete_path(tab[1])
|
unknown
|
codeparrot/codeparrot-clean
| ||
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libspatialindex(CMakePackage):
homepage = "http://libspatialindex.github.io"
url = "https://github.com/libspatialindex/libspatialindex/tarball/1.8.5"
version('1.8.5', 'a95d8159714dbda9a274792cd273d298')
|
unknown
|
codeparrot/codeparrot-clean
| ||
""" Python character mapping codec test
This uses the test codec in testcodec.py and thus also tests the
encodings package lookup scheme.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright 2000 Guido van Rossum.
"""#"
import test.support, unittest
import codecs
# Register a search function which knows about our codec
def codec_search_function(encoding):
if encoding == 'testcodec':
from test import testcodec
return tuple(testcodec.getregentry())
return None
codecs.register(codec_search_function)
# test codec's name (see test/testcodec.py)
codecname = 'testcodec'
class CharmapCodecTest(unittest.TestCase):
def test_constructorx(self):
self.assertEqual(str(b'abc', codecname), 'abc')
self.assertEqual(str(b'xdef', codecname), 'abcdef')
self.assertEqual(str(b'defx', codecname), 'defabc')
self.assertEqual(str(b'dxf', codecname), 'dabcf')
self.assertEqual(str(b'dxfx', codecname), 'dabcfabc')
def test_encodex(self):
self.assertEqual('abc'.encode(codecname), b'abc')
self.assertEqual('xdef'.encode(codecname), b'abcdef')
self.assertEqual('defx'.encode(codecname), b'defabc')
self.assertEqual('dxf'.encode(codecname), b'dabcf')
self.assertEqual('dxfx'.encode(codecname), b'dabcfabc')
def test_constructory(self):
self.assertEqual(str(b'ydef', codecname), 'def')
self.assertEqual(str(b'defy', codecname), 'def')
self.assertEqual(str(b'dyf', codecname), 'df')
self.assertEqual(str(b'dyfy', codecname), 'df')
def test_maptoundefined(self):
self.assertRaises(UnicodeError, str, b'abc\001', codecname)
def test_main():
test.support.run_unittest(CharmapCodecTest)
if __name__ == "__main__":
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef ISO_FORMAT_HPP___
#define ISO_FORMAT_HPP___
/* Copyright (c) 2002,2003 CrystalClear Software, Inc.
* Use, modification and distribution is subject to the
* Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
* Author: Jeff Garland, Bart Garst
* $Date$
*/
#include "boost/date_time/parse_format_base.hpp"
namespace boost {
namespace date_time {
//! Class to provide common ISO 8601 formatting spec
template<class charT>
class iso_format_base {
public:
//! Describe month format -- its an integer in ISO 8601 format
static month_format_spec month_format()
{
return month_as_integer;
}
//! String used printed is date is invalid
static const charT* not_a_date()
{
return "not-a-date-time";
}
//! String used to for positive infinity value
static const charT* pos_infinity()
{
return "+infinity";
}
//! String used to for positive infinity value
static const charT* neg_infinity()
{
return "-infinity";
}
//! ISO 8601 char for a year -- used in durations
static charT year_sep_char()
{
return 'Y';
}
//! ISO 8601 char for a month
static charT month_sep_char()
{
return '-';
}
//! ISO 8601 char for a day
static charT day_sep_char()
{
return '-';
}
//! char for minute
static charT hour_sep_char()
{
return ':';
}
//! char for minute
static charT minute_sep_char()
{
return ':';
}
//! char for second
static charT second_sep_char()
{
return ':';
}
//! ISO 8601 char for a period
static charT period_start_char()
{
return 'P';
}
//! Used in time in mixed strings to set start of time
static charT time_start_char()
{
return 'T';
}
//! Used in mixed strings to identify start of a week number
static charT week_start_char()
{
return 'W';
}
//! Separators for periods
static charT period_sep_char()
{
return '/';
}
//! Separator for hh:mm:ss
static charT time_sep_char()
{
return ':';
}
//! Preferred Separator for hh:mm:ss,decimal_fraction
static charT fractional_time_sep_char()
{
return ',';
}
static bool is_component_sep(charT sep)
{
switch(sep) {
case 'H':
case 'M':
case 'S':
case 'W':
case 'T':
case 'Y':
case 'D':return true;
default:
return false;
}
}
static bool is_fractional_time_sep(charT sep)
{
switch(sep) {
case ',':
case '.': return true;
default: return false;
}
}
static bool is_timezone_sep(charT sep)
{
switch(sep) {
case '+':
case '-': return true;
default: return false;
}
}
static charT element_sep_char()
{
return '-';
}
};
#ifndef BOOST_NO_STD_WSTRING
//! Class to provide common ISO 8601 formatting spec
template<>
class iso_format_base<wchar_t> {
public:
//! Describe month format -- its an integer in ISO 8601 format
static month_format_spec month_format()
{
return month_as_integer;
}
//! String used printed is date is invalid
static const wchar_t* not_a_date()
{
return L"not-a-date-time";
}
//! String used to for positive infinity value
static const wchar_t* pos_infinity()
{
return L"+infinity";
}
//! String used to for positive infinity value
static const wchar_t* neg_infinity()
{
return L"-infinity";
}
//! ISO 8601 char for a year -- used in durations
static wchar_t year_sep_char()
{
return 'Y';
}
//! ISO 8601 char for a month
static wchar_t month_sep_char()
{
return '-';
}
//! ISO 8601 char for a day
static wchar_t day_sep_char()
{
return '-';
}
//! char for minute
static wchar_t hour_sep_char()
{
return ':';
}
//! char for minute
static wchar_t minute_sep_char()
{
return ':';
}
//! char for second
static wchar_t second_sep_char()
{
return ':';
}
//! ISO 8601 char for a period
static wchar_t period_start_char()
{
return 'P';
}
//! Used in time in mixed strings to set start of time
static wchar_t time_start_char()
{
return 'T';
}
//! Used in mixed strings to identify start of a week number
static wchar_t week_start_char()
{
return 'W';
}
//! Separators for periods
static wchar_t period_sep_char()
{
return '/';
}
//! Separator for hh:mm:ss
static wchar_t time_sep_char()
{
return ':';
}
//! Preferred Separator for hh:mm:ss,decimal_fraction
static wchar_t fractional_time_sep_char()
{
return ',';
}
static bool is_component_sep(wchar_t sep)
{
switch(sep) {
case 'H':
case 'M':
case 'S':
case 'W':
case 'T':
case 'Y':
case 'D':return true;
default:
return false;
}
}
static bool is_fractional_time_sep(wchar_t sep)
{
switch(sep) {
case ',':
case '.': return true;
default: return false;
}
}
static bool is_timezone_sep(wchar_t sep)
{
switch(sep) {
case '+':
case '-': return true;
default: return false;
}
}
static wchar_t element_sep_char()
{
return '-';
}
};
#endif // BOOST_NO_STD_WSTRING
//! Format description for ISO 8601 normal YYYYMMDD
template<class charT>
class iso_format : public iso_format_base<charT> {
public:
//! The ios standard format doesn't use char separators
static bool has_date_sep_chars()
{
return false;
}
};
//! Extended format uses seperators YYYY-MM-DD
template<class charT>
class iso_extended_format : public iso_format_base<charT> {
public:
//! Extended format needs char separators
static bool has_date_sep_chars()
{
return true;
}
};
} } //namespace date_time
#endif
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/date_time/iso_format.hpp
|
##############################################################################
#
# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""This module tests the Formats and everything that goes with it.
$Id: test_formats.py 40912 2005-12-20 17:49:19Z poster $
"""
import os
import datetime
import pytz
import pickle
from unittest import TestCase, TestSuite, makeSuite
from zope.i18n.interfaces import IDateTimeFormat
from zope.i18n.format import DateTimeFormat
from zope.i18n.format import parseDateTimePattern, buildDateTimeParseInfo
from zope.i18n.format import DateTimePatternParseError, DateTimeParseError
from zope.i18n.interfaces import INumberFormat
from zope.i18n.format import NumberFormat
from zope.i18n.format import parseNumberPattern
class LocaleStub(object):
pass
class LocaleCalendarStub(object):
type = u'gregorian'
months = { 1: ('Januar', 'Jan'), 2: ('Februar', 'Feb'),
3: ('Maerz', 'Mrz'), 4: ('April', 'Apr'),
5: ('Mai', 'Mai'), 6: ('Juni', 'Jun'),
7: ('Juli', 'Jul'), 8: ('August', 'Aug'),
9: ('September', 'Sep'), 10: ('Oktober', 'Okt'),
11: ('November', 'Nov'), 12: ('Dezember', 'Dez')}
days = {1: ('Montag', 'Mo'), 2: ('Dienstag', 'Di'),
3: ('Mittwoch', 'Mi'), 4: ('Donnerstag', 'Do'),
5: ('Freitag', 'Fr'), 6: ('Samstag', 'Sa'),
7: ('Sonntag', 'So')}
am = 'vorm.'
pm = 'nachm.'
eras = {1: (None, 'v. Chr.'), 2: (None, 'n. Chr.')}
week = {'firstDay': 1, 'minDays': 1}
def getMonthNames(self):
return [self.months.get(type, (None, None))[0] for type in range(1, 13)]
def getMonthTypeFromName(self, name):
for item in self.months.items():
if item[1][0] == name:
return item[0]
def getMonthAbbreviations(self):
return [self.months.get(type, (None, None))[1] for type in range(1, 13)]
def getMonthTypeFromAbbreviation(self, abbr):
for item in self.months.items():
if item[1][1] == abbr:
return item[0]
def getDayNames(self):
return [self.days.get(type, (None, None))[0] for type in range(1, 8)]
def getDayTypeFromName(self, name):
for item in self.days.items():
if item[1][0] == name:
return item[0]
def getDayAbbreviations(self):
return [self.days.get(type, (None, None))[1] for type in range(1, 8)]
def getDayTypeFromAbbreviation(self, abbr):
for item in self.days.items():
if item[1][1] == abbr:
return item[0]
class TestDateTimePatternParser(TestCase):
"""Extensive tests for the ICU-based-syntax datetime pattern parser."""
def testParseSimpleTimePattern(self):
self.assertEqual(parseDateTimePattern('HH'),
[('H', 2)])
self.assertEqual(parseDateTimePattern('HH:mm'),
[('H', 2), ':', ('m', 2)])
self.assertEqual(parseDateTimePattern('HH:mm:ss'),
[('H', 2), ':', ('m', 2), ':', ('s', 2)])
self.assertEqual(parseDateTimePattern('mm:ss'),
[('m', 2), ':', ('s', 2)])
self.assertEqual(parseDateTimePattern('H:m:s'),
[('H', 1), ':', ('m', 1), ':', ('s', 1)])
self.assertEqual(parseDateTimePattern('HHH:mmmm:sssss'),
[('H', 3), ':', ('m', 4), ':', ('s', 5)])
def testParseGermanTimePattern(self):
# German full
self.assertEqual(parseDateTimePattern("H:mm' Uhr 'z"),
[('H', 1), ':', ('m', 2), ' Uhr ', ('z', 1)])
# German long
self.assertEqual(parseDateTimePattern("HH:mm:ss z"),
[('H', 2), ':', ('m', 2), ':', ('s', 2), ' ',
('z', 1)])
# German medium
self.assertEqual(parseDateTimePattern("HH:mm:ss"),
[('H', 2), ':', ('m', 2), ':', ('s', 2)])
# German short
self.assertEqual(parseDateTimePattern("HH:mm"),
[('H', 2), ':', ('m', 2)])
def testParseRealDate(self):
# German full
self.assertEqual(parseDateTimePattern("EEEE, d. MMMM yyyy"),
[('E', 4), ', ', ('d', 1), '. ', ('M', 4),
' ', ('y', 4)])
# German long
self.assertEqual(parseDateTimePattern("d. MMMM yyyy"),
[('d', 1), '. ', ('M', 4), ' ', ('y', 4)])
# German medium
self.assertEqual(parseDateTimePattern("dd.MM.yyyy"),
[('d', 2), '.', ('M', 2), '.', ('y', 4)])
# German short
self.assertEqual(parseDateTimePattern("dd.MM.yy"),
[('d', 2), '.', ('M', 2), '.', ('y', 2)])
def testParseRealDateTime(self):
# German full
self.assertEqual(
parseDateTimePattern("EEEE, d. MMMM yyyy H:mm' Uhr 'z"),
[('E', 4), ', ', ('d', 1), '. ', ('M', 4), ' ', ('y', 4),
' ', ('H', 1), ':', ('m', 2), ' Uhr ', ('z', 1)])
# German long
self.assertEqual(
parseDateTimePattern("d. MMMM yyyy HH:mm:ss z"),
[('d', 1), '. ', ('M', 4), ' ', ('y', 4),
' ', ('H', 2), ':', ('m', 2), ':', ('s', 2), ' ', ('z', 1)])
# German medium
self.assertEqual(
parseDateTimePattern("dd.MM.yyyy HH:mm:ss"),
[('d', 2), '.', ('M', 2), '.', ('y', 4),
' ', ('H', 2), ':', ('m', 2), ':', ('s', 2)])
# German short
self.assertEqual(
parseDateTimePattern("dd.MM.yy HH:mm"),
[('d', 2), '.', ('M', 2), '.', ('y', 2),
' ', ('H', 2), ':', ('m', 2)])
def testParseQuotesInPattern(self):
self.assertEqual(parseDateTimePattern("HH''mm"),
[('H', 2), "'", ('m', 2)])
self.assertEqual(parseDateTimePattern("HH'HHmm'mm"),
[('H', 2), 'HHmm', ('m', 2)])
self.assertEqual(parseDateTimePattern("HH':'''':'mm"),
[('H', 2), ":':", ('m', 2)])
self.assertEqual(parseDateTimePattern("HH':' ':'mm"),
[('H', 2), ": :", ('m', 2)])
def testParseDateTimePatternError(self):
# Quote not closed
try:
parseDateTimePattern("HH' Uhr")
except DateTimePatternParseError, err:
self.assertEqual(
str(err), 'The quote starting at character 2 is not closed.')
# Test correct length of characters in datetime fields
try:
parseDateTimePattern("HHHHH")
except DateTimePatternParseError, err:
self.assert_(str(err).endswith('You have: 5'))
class TestBuildDateTimeParseInfo(TestCase):
"""This class tests the functionality of the buildDateTimeParseInfo()
method with the German locale.
"""
def info(self, entry):
info = buildDateTimeParseInfo(LocaleCalendarStub(), [entry])
return info[entry]
def testGenericNumbers(self):
for char in 'dDFkKhHmsSwW':
for length in range(1, 6):
self.assertEqual(self.info((char, length)),
'([0-9]{%i,1000})' %length)
def testYear(self):
self.assertEqual(self.info(('y', 2)), '([0-9]{2})')
self.assertEqual(self.info(('y', 4)), '([0-9]{4})')
self.assertRaises(DateTimePatternParseError, self.info, ('y', 1))
self.assertRaises(DateTimePatternParseError, self.info, ('y', 3))
self.assertRaises(DateTimePatternParseError, self.info, ('y', 5))
def testAMPMMarker(self):
names = ['vorm.', 'nachm.']
for length in range(1, 6):
self.assertEqual(self.info(('a', length)), '('+'|'.join(names)+')')
def testEra(self):
self.assertEqual(self.info(('G', 1)), '(v. Chr.|n. Chr.)')
def testTimeZone(self):
self.assertEqual(self.info(('z', 1)), r'([\+-][0-9]{3,4})')
self.assertEqual(self.info(('z', 2)), r'([\+-][0-9]{2}:[0-9]{2})')
self.assertEqual(self.info(('z', 3)), r'([a-zA-Z]{3})')
self.assertEqual(self.info(('z', 4)), r'([a-zA-Z /\.]*)')
self.assertEqual(self.info(('z', 5)), r'([a-zA-Z /\.]*)')
def testMonthNumber(self):
self.assertEqual(self.info(('M', 1)), '([0-9]{1,2})')
self.assertEqual(self.info(('M', 2)), '([0-9]{2})')
def testMonthNames(self):
names = [u'Januar', u'Februar', u'Maerz', u'April',
u'Mai', u'Juni', u'Juli', u'August', u'September', u'Oktober',
u'November', u'Dezember']
self.assertEqual(self.info(('M', 4)), '('+'|'.join(names)+')')
def testMonthAbbr(self):
names = ['Jan', 'Feb', 'Mrz', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug',
'Sep', 'Okt', 'Nov', 'Dez']
self.assertEqual(self.info(('M', 3)), '('+'|'.join(names)+')')
def testWeekdayNumber(self):
self.assertEqual(self.info(('E', 1)), '([0-9])')
self.assertEqual(self.info(('E', 2)), '([0-9]{2})')
def testWeekdayNames(self):
names = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag',
'Freitag', 'Samstag', 'Sonntag']
self.assertEqual(self.info(('E', 4)), '('+'|'.join(names)+')')
self.assertEqual(self.info(('E', 5)), '('+'|'.join(names)+')')
self.assertEqual(self.info(('E', 10)), '('+'|'.join(names)+')')
def testWeekdayAbbr(self):
names = ['Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa', 'So']
self.assertEqual(self.info(('E', 3)), '('+'|'.join(names)+')')
class TestDateTimeFormat(TestCase):
"""Test the functionality of an implmentation of the ILocaleProvider
interface."""
format = DateTimeFormat(calendar=LocaleCalendarStub())
def testInterfaceConformity(self):
self.assert_(IDateTimeFormat.providedBy(self.format))
def testParseSimpleDateTime(self):
# German short
self.assertEqual(
self.format.parse('02.01.03 21:48', 'dd.MM.yy HH:mm'),
datetime.datetime(2003, 01, 02, 21, 48))
def testParseRealDateTime(self):
# German medium
self.assertEqual(
self.format.parse('02.01.2003 21:48:01', 'dd.MM.yyyy HH:mm:ss'),
datetime.datetime(2003, 01, 02, 21, 48, 01))
# German long
# TODO: The parser does not support timezones yet.
self.assertEqual(self.format.parse(
'2. Januar 2003 21:48:01 +100',
'd. MMMM yyyy HH:mm:ss z'),
datetime.datetime(2003, 01, 02, 21, 48, 01,
tzinfo=pytz.timezone('Europe/Berlin')))
# German full
# TODO: The parser does not support timezones yet.
self.assertEqual(self.format.parse(
'Donnerstag, 2. Januar 2003 21:48 Uhr +100',
"EEEE, d. MMMM yyyy H:mm' Uhr 'z"),
datetime.datetime(2003, 01, 02, 21, 48,
tzinfo=pytz.timezone('Europe/Berlin')))
def testParseAMPMDateTime(self):
self.assertEqual(
self.format.parse('02.01.03 09:48 nachm.', 'dd.MM.yy hh:mm a'),
datetime.datetime(2003, 01, 02, 21, 48))
def testParseTimeZone(self):
dt = self.format.parse('09:48 -600', 'HH:mm z')
self.assertEqual(pickle.loads(pickle.dumps(dt)), dt)
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=-6))
self.assertEqual(dt.tzinfo.zone, None)
self.assertEqual(dt.tzinfo.tzname(dt), None)
dt = self.format.parse('09:48 -06:00', 'HH:mm zz')
self.assertEqual(pickle.loads(pickle.dumps(dt)), dt)
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=-6))
self.assertEqual(dt.tzinfo.zone, None)
self.assertEqual(dt.tzinfo.tzname(dt), None)
def testParseTimeZoneNames(self):
# Note that EST is a deprecated timezone name since it is a US
# interpretation (other countries also use the EST timezone
# abbreviation)
dt = self.format.parse('01.01.2003 09:48 EST', 'dd.MM.yyyy HH:mm zzz')
self.assertEqual(pickle.loads(pickle.dumps(dt)), dt)
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=-5))
self.assertEqual(dt.tzinfo.zone, 'EST')
self.assertEqual(dt.tzinfo.tzname(dt), 'EST')
dt = self.format.parse('01.01.2003 09:48 US/Eastern',
'dd.MM.yyyy HH:mm zzzz')
self.assertEqual(pickle.loads(pickle.dumps(dt)), dt)
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=-5))
self.assertEqual(dt.tzinfo.zone, 'US/Eastern')
self.assertEqual(dt.tzinfo.tzname(dt), 'EST')
dt = self.format.parse('01.01.2003 09:48 Australia/Sydney',
'dd.MM.yyyy HH:mm zzzz')
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=11))
self.assertEqual(dt.tzinfo.zone, 'Australia/Sydney')
self.assertEqual(dt.tzinfo.tzname(dt), 'EST')
# Note that historical and future (as far as known)
# timezones are handled happily using the pytz timezone database
# US DST transition points are changing in 2007
dt = self.format.parse('01.04.2006 09:48 US/Eastern',
'dd.MM.yyyy HH:mm zzzz')
self.assertEqual(dt.tzinfo.zone, 'US/Eastern')
self.assertEqual(dt.tzinfo.tzname(dt), 'EST')
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=-5))
dt = self.format.parse('01.04.2007 09:48 US/Eastern',
'dd.MM.yyyy HH:mm zzzz')
self.assertEqual(dt.tzinfo.zone, 'US/Eastern')
self.assertEqual(dt.tzinfo.tzname(dt), 'EDT')
self.assertEqual(dt.tzinfo.utcoffset(dt), datetime.timedelta(hours=-4))
def testDateTimeParseError(self):
self.assertRaises(DateTimeParseError,
self.format.parse, '02.01.03 21:48', 'dd.MM.yyyy HH:mm')
def testParse12PM(self):
self.assertEqual(
self.format.parse('01.01.03 12:00 nachm.', 'dd.MM.yy hh:mm a'),
datetime.datetime(2003, 01, 01, 12, 00, 00, 00))
def testParseUnusualFormats(self):
self.assertEqual(
self.format.parse('001. Januar 03 0012:00',
'ddd. MMMMM yy HHHH:mm'),
datetime.datetime(2003, 01, 01, 12, 00, 00, 00))
self.assertEqual(
self.format.parse('0001. Jan 2003 0012:00 vorm.',
'dddd. MMM yyyy hhhh:mm a'),
datetime.datetime(2003, 01, 01, 00, 00, 00, 00))
def testFormatSimpleDateTime(self):
# German short
self.assertEqual(
self.format.format(datetime.datetime(2003, 01, 02, 21, 48),
'dd.MM.yy HH:mm'),
'02.01.03 21:48')
def testFormatRealDateTime(self):
tz = pytz.timezone('Europe/Berlin')
dt = datetime.datetime(2003, 01, 02, 21, 48, 01, tzinfo=tz)
# German medium
self.assertEqual(
self.format.format(dt, 'dd.MM.yyyy HH:mm:ss'),
'02.01.2003 21:48:01')
# German long
self.assertEqual(
self.format.format(dt, 'd. MMMM yyyy HH:mm:ss z'),
'2. Januar 2003 21:48:01 +100')
# German full
self.assertEqual(self.format.format(
dt, "EEEE, d. MMMM yyyy H:mm' Uhr 'z"),
'Donnerstag, 2. Januar 2003 21:48 Uhr +100')
def testFormatAMPMDateTime(self):
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 21, 48),
'dd.MM.yy hh:mm a'),
'02.01.03 09:48 nachm.')
def testFormatAllWeekdays(self):
for day in range(1, 8):
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, day+5, 21, 48),
"EEEE, d. MMMM yyyy H:mm' Uhr 'z"),
'%s, %i. Januar 2003 21:48 Uhr +000' %(
self.format.calendar.days[day][0], day+5))
def testFormatTimeZone(self):
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, 00), 'z'),
'+000')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, 00), 'zz'),
'+00:00')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, 00), 'zzz'),
'UTC')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, 00), 'zzzz'),
'UTC')
tz = pytz.timezone('US/Eastern')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, tzinfo=tz), 'z'),
'-500')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, tzinfo=tz), 'zz'),
'-05:00')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, tzinfo=tz), 'zzz'),
'EST')
self.assertEqual(self.format.format(
datetime.datetime(2003, 01, 02, 12, tzinfo=tz), 'zzzz'),
'US/Eastern')
def testFormatWeekDay(self):
date = datetime.date(2003, 01, 02)
self.assertEqual(self.format.format(date, "E"),
'4')
self.assertEqual(self.format.format(date, "EE"),
'04')
self.assertEqual(self.format.format(date, "EEE"),
'Do')
self.assertEqual(self.format.format(date, "EEEE"),
'Donnerstag')
# Create custom calendar, which has Sunday as the first day of the
# week. I am assigning a totally new dict here, since dicts are
# mutable and the value would be changed for the class and all its
# instances.
calendar = LocaleCalendarStub()
calendar.week = {'firstDay': 7, 'minDays': 1}
format = DateTimeFormat(calendar=calendar)
self.assertEqual(format.format(date, "E"),
'5')
self.assertEqual(format.format(date, "EE"),
'05')
def testFormatDayOfWeekInMonth(self):
date = datetime.date(2003, 01, 02)
self.assertEqual(self.format.format(date, "F"),
'1')
self.assertEqual(self.format.format(date, "FF"),
'01')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 9), "F"),
'2')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 16), "F"),
'3')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 23), "F"),
'4')
def testFormatWeekInMonth(self):
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), "W"),
'1')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), "WW"),
'01')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 8), "W"),
'2')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 19), "W"),
'3')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 20), "W"),
'4')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 31), "W"),
'5')
def testFormatHourInDayOneTo24(self):
self.assertEqual(
self.format.format(datetime.time(5, 0), "k"),
'5')
self.assertEqual(
self.format.format(datetime.time(5, 0), "kk"),
'05')
self.assertEqual(
self.format.format(datetime.time(0, 0), "k"),
'24')
self.assertEqual(
self.format.format(datetime.time(1, 0), "k"),
'1')
def testFormatHourInDayZeroToEleven(self):
self.assertEqual(
self.format.format(datetime.time(5, 0), "K"),
'5')
self.assertEqual(
self.format.format(datetime.time(5, 0), "KK"),
'05')
self.assertEqual(
self.format.format(datetime.time(0, 0), "K"),
'0')
self.assertEqual(
self.format.format(datetime.time(12, 0), "K"),
'0')
self.assertEqual(
self.format.format(datetime.time(11, 0), "K"),
'11')
self.assertEqual(
self.format.format(datetime.time(23, 0), "K"),
'11')
def testFormatSimpleHourRepresentation(self):
self.assertEqual(
self.format.format(datetime.datetime(2003, 01, 02, 23, 00),
'dd.MM.yy h:mm:ss a'),
'02.01.03 11:00:00 nachm.')
self.assertEqual(
self.format.format(datetime.datetime(2003, 01, 02, 02, 00),
'dd.MM.yy h:mm:ss a'),
'02.01.03 2:00:00 vorm.')
self.assertEqual(
self.format.format(datetime.time(0, 15), 'h:mm a'),
'12:15 vorm.')
self.assertEqual(
self.format.format(datetime.time(1, 15), 'h:mm a'),
'1:15 vorm.')
self.assertEqual(
self.format.format(datetime.time(12, 15), 'h:mm a'),
'12:15 nachm.')
self.assertEqual(
self.format.format(datetime.time(13, 15), 'h:mm a'),
'1:15 nachm.')
def testFormatDayInYear(self):
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), 'D'),
u'3')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), 'DD'),
u'03')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), 'DDD'),
u'003')
self.assertEqual(
self.format.format(datetime.date(2003, 12, 31), 'D'),
u'365')
self.assertEqual(
self.format.format(datetime.date(2003, 12, 31), 'DD'),
u'365')
self.assertEqual(
self.format.format(datetime.date(2003, 12, 31), 'DDD'),
u'365')
self.assertEqual(
self.format.format(datetime.date(2004, 12, 31), 'DDD'),
u'366')
def testFormatDayOfWeekInMOnth(self):
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), 'F'),
u'1')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 10), 'F'),
u'2')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 17), 'F'),
u'3')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 24), 'F'),
u'4')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 31), 'F'),
u'5')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 6), 'F'),
u'1')
def testFormatUnusualFormats(self):
self.assertEqual(
self.format.format(datetime.date(2003, 1, 3), 'DDD-yyyy'),
u'003-2003')
self.assertEqual(
self.format.format(datetime.date(2003, 1, 10),
"F. EEEE 'im' MMMM, yyyy"),
u'2. Freitag im Januar, 2003')
class TestNumberPatternParser(TestCase):
"""Extensive tests for the ICU-based-syntax number pattern parser."""
def testParseSimpleIntegerPattern(self):
self.assertEqual(
parseNumberPattern('###0'),
((None, '', None, '###0', '', '', None, '', None, 0),
(None, '', None, '###0', '', '', None, '', None, 0)))
def testParseScientificIntegerPattern(self):
self.assertEqual(
parseNumberPattern('###0E#0'),
((None, '', None, '###0', '', '#0', None, '', None, 0),
(None, '', None, '###0', '', '#0', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('###0E+#0'),
((None, '', None, '###0', '', '+#0', None, '', None, 0),
(None, '', None, '###0', '', '+#0', None, '', None, 0)))
def testParsePosNegAlternativeIntegerPattern(self):
self.assertEqual(
parseNumberPattern('###0;#0'),
((None, '', None, '###0', '', '', None, '', None, 0),
(None, '', None, '#0', '', '', None, '', None, 0)))
def testParsePrefixedIntegerPattern(self):
self.assertEqual(
parseNumberPattern('+###0'),
((None, '+', None, '###0', '', '', None, '', None, 0),
(None, '+', None, '###0', '', '', None, '', None, 0)))
def testParsePosNegIntegerPattern(self):
self.assertEqual(
parseNumberPattern('+###0;-###0'),
((None, '+', None, '###0', '', '', None, '', None, 0),
(None, '-', None, '###0', '', '', None, '', None, 0)))
def testParseScientificPosNegIntegerPattern(self):
self.assertEqual(
parseNumberPattern('+###0E0;-###0E#0'),
((None, '+', None, '###0', '', '0', None, '', None, 0),
(None, '-', None, '###0', '', '#0', None, '', None, 0)))
def testParseThousandSeparatorIntegerPattern(self):
self.assertEqual(
parseNumberPattern('#,##0'),
((None, '', None, '###0', '', '', None, '', None, 1),
(None, '', None, '###0', '', '', None, '', None, 1)))
def testParseSimpleDecimalPattern(self):
self.assertEqual(
parseNumberPattern('###0.00#'),
((None, '', None, '###0', '00#', '', None, '', None, 0),
(None, '', None, '###0', '00#', '', None, '', None, 0)))
def testParseScientificDecimalPattern(self):
self.assertEqual(
parseNumberPattern('###0.00#E#0'),
((None, '', None, '###0', '00#', '#0', None, '', None, 0),
(None, '', None, '###0', '00#', '#0', None, '', None, 0)))
def testParsePosNegAlternativeFractionPattern(self):
self.assertEqual(
parseNumberPattern('###0.00#;#0.0#'),
((None, '', None, '###0', '00#', '', None, '', None, 0),
(None, '', None, '#0', '0#', '', None, '', None, 0)))
def testParsePosNegFractionPattern(self):
self.assertEqual(
parseNumberPattern('+###0.0##;-###0.0##'),
((None, '+', None, '###0', '0##', '', None, '', None, 0),
(None, '-', None, '###0', '0##', '', None, '', None, 0)))
def testParseScientificPosNegFractionPattern(self):
self.assertEqual(
parseNumberPattern('+###0.0##E#0;-###0.0##E0'),
((None, '+', None, '###0', '0##', '#0', None, '', None, 0),
(None, '-', None, '###0', '0##', '0', None, '', None, 0)))
def testParseThousandSeparatorFractionPattern(self):
self.assertEqual(
parseNumberPattern('#,##0.0#'),
((None, '', None, '###0', '0#', '', None, '', None, 1),
(None, '', None, '###0', '0#', '', None, '', None, 1)))
def testParsePadding1WithoutPrefixPattern(self):
self.assertEqual(
parseNumberPattern('* ###0'),
((' ', '', None, '###0', '', '', None, '', None, 0),
(' ', '', None, '###0', '', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('* ###0.0##'),
((' ', '', None, '###0', '0##', '', None, '', None, 0),
(' ', '', None, '###0', '0##', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('* ###0.0##;*_###0.0##'),
((' ', '', None, '###0', '0##', '', None, '', None, 0),
('_', '', None, '###0', '0##', '', None, '', None, 0)))
def testParsePadding1WithPrefixPattern(self):
self.assertEqual(
parseNumberPattern('* +###0'),
((' ', '+', None, '###0', '', '', None, '', None, 0),
(' ', '+', None, '###0', '', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('* +###0.0##'),
((' ', '+', None, '###0', '0##', '', None, '', None, 0),
(' ', '+', None, '###0', '0##', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('* +###0.0##;*_-###0.0##'),
((' ', '+', None, '###0', '0##', '', None, '', None, 0),
('_', '-', None, '###0', '0##', '', None, '', None, 0)))
def testParsePadding1Padding2WithPrefixPattern(self):
self.assertEqual(
parseNumberPattern('* +* ###0'),
((' ', '+', ' ', '###0', '', '', None, '', None, 0),
(' ', '+', ' ', '###0', '', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('* +* ###0.0##'),
((' ', '+', ' ', '###0', '0##', '', None, '', None, 0),
(' ', '+', ' ', '###0', '0##', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('* +* ###0.0##;*_-*_###0.0##'),
((' ', '+', ' ', '###0', '0##', '', None, '', None, 0),
('_', '-', '_', '###0', '0##', '', None, '', None, 0)))
def testParsePadding3WithoutSufffixPattern(self):
self.assertEqual(
parseNumberPattern('###0* '),
((None, '', None, '###0', '', '', ' ', '', None, 0),
(None, '', None, '###0', '', '', ' ', '', None, 0)))
self.assertEqual(
parseNumberPattern('###0.0##* '),
((None, '', None, '###0', '0##', '', ' ', '', None, 0),
(None, '', None, '###0', '0##', '', ' ', '', None, 0)))
self.assertEqual(
parseNumberPattern('###0.0##* ;###0.0##*_'),
((None, '', None, '###0', '0##', '', ' ', '', None, 0),
(None, '', None, '###0', '0##', '', '_', '', None, 0)))
def testParsePadding3InScientificPattern(self):
self.assertEqual(
parseNumberPattern('###0E#0* '),
((None, '', None, '###0', '', '#0', ' ', '', None, 0),
(None, '', None, '###0', '', '#0', ' ', '', None, 0)))
self.assertEqual(
parseNumberPattern('###0.0##E0* '),
((None, '', None, '###0', '0##', '0', ' ', '', None, 0),
(None, '', None, '###0', '0##', '0', ' ', '', None, 0)))
self.assertEqual(
parseNumberPattern('###0.0##E#0* ;###0.0##E0*_'),
((None, '', None, '###0', '0##', '#0', ' ', '', None, 0),
(None, '', None, '###0', '0##', '0', '_', '', None, 0)))
def testParsePadding3WithSufffixPattern(self):
self.assertEqual(
parseNumberPattern('###0* /'),
((None, '', None, '###0', '', '', ' ', '/', None, 0),
(None, '', None, '###0', '', '', ' ', '/', None, 0)))
self.assertEqual(
parseNumberPattern('###0.0#* /'),
((None, '', None, '###0', '0#', '', ' ', '/', None, 0),
(None, '', None, '###0', '0#', '', ' ', '/', None, 0)))
self.assertEqual(
parseNumberPattern('###0.0#* /;###0.0#*_/'),
((None, '', None, '###0', '0#', '', ' ', '/', None, 0),
(None, '', None, '###0', '0#', '', '_', '/', None, 0)))
def testParsePadding3And4WithSuffixPattern(self):
self.assertEqual(
parseNumberPattern('###0* /* '),
((None, '', None, '###0', '', '', ' ', '/', ' ', 0),
(None, '', None, '###0', '', '', ' ', '/', ' ', 0)))
self.assertEqual(
parseNumberPattern('###0* /* ;###0*_/*_'),
((None, '', None, '###0', '', '', ' ', '/', ' ', 0),
(None, '', None, '###0', '', '', '_', '/', '_', 0)))
def testParseMultipleCharacterPrefix(self):
self.assertEqual(
parseNumberPattern('DM###0'),
((None, 'DM', None, '###0', '', '', None, '', None, 0),
(None, 'DM', None, '###0', '', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern('DM* ###0'),
((None, 'DM', ' ', '###0', '', '', None, '', None, 0),
(None, 'DM', ' ', '###0', '', '', None, '', None, 0)))
def testParseStringEscapedPrefix(self):
self.assertEqual(
parseNumberPattern("'DEM'###0"),
((None, 'DEM', None, '###0', '', '', None, '', None, 0),
(None, 'DEM', None, '###0', '', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern("D'EM'###0"),
((None, 'DEM', None, '###0', '', '', None, '', None, 0),
(None, 'DEM', None, '###0', '', '', None, '', None, 0)))
self.assertEqual(
parseNumberPattern("D'E'M###0"),
((None, 'DEM', None, '###0', '', '', None, '', None, 0),
(None, 'DEM', None, '###0', '', '', None, '', None, 0)))
def testParseStringEscapedSuffix(self):
self.assertEqual(
parseNumberPattern("###0'DEM'"),
((None, '', None, '###0', '', '', None, 'DEM', None, 0),
(None, '', None, '###0', '', '', None, 'DEM', None, 0)))
self.assertEqual(
parseNumberPattern("###0D'EM'"),
((None, '', None, '###0', '', '', None, 'DEM', None, 0),
(None, '', None, '###0', '', '', None, 'DEM', None, 0)))
self.assertEqual(
parseNumberPattern("###0D'E'M"),
((None, '', None, '###0', '', '', None, 'DEM', None, 0),
(None, '', None, '###0', '', '', None, 'DEM', None, 0)))
class TestNumberFormat(TestCase):
"""Test the functionality of an implmentation of the NumberFormat."""
format = NumberFormat(symbols={
'decimal': '.', 'group': ',', 'list': ';', 'percentSign': '%',
'nativeZeroDigit': '0', 'patternDigit': '#', 'plusSign': '+',
'minusSign': '-', 'exponential': 'E', 'perMille': 'o/oo',
'infinity': 'oo', 'nan': 'N/A'})
def testInterfaceConformity(self):
self.assert_(INumberFormat.providedBy(self.format))
def testParseSimpleInteger(self):
self.assertEqual(self.format.parse('23341', '###0'),
23341)
self.assertEqual(self.format.parse('041', '#000'),
41)
def testParseScientificInteger(self):
self.assertEqual(self.format.parse('2.3341E4', '0.0###E0'),
23341)
self.assertEqual(self.format.parse('4.100E01', '0.000##E00'),
41)
self.assertEqual(self.format.parse('1E0', '0E0'),
1)
self.assertEqual(self.format.parse('0E0', '0E0'),
0)
# This is a special case I found not working, but is used frequently
# in the new LDML Locale files.
self.assertEqual(self.format.parse('2.3341E+04', '0.000###E+00'),
23341)
def testParsePosNegAlternativeInteger(self):
self.assertEqual(self.format.parse('23341', '#000;#00'),
23341)
self.assertEqual(self.format.parse('041', '#000;#00'),
41)
self.assertEqual(self.format.parse('41', '#000;#00'),
-41)
self.assertEqual(self.format.parse('01', '#000;#00'),
-1)
def testParsePrefixedInteger(self):
self.assertEqual(self.format.parse('+23341', '+###0'),
23341)
self.assertEqual(self.format.parse('+041', '+#000'),
41)
def testParsePosNegInteger(self):
self.assertEqual(self.format.parse('+23341', '+###0;-###0'),
23341)
self.assertEqual(self.format.parse('+041', '+#000;-#000'),
41)
self.assertEqual(self.format.parse('-23341', '+###0;-###0'),
-23341)
self.assertEqual(self.format.parse('-041', '+#000;-#000'),
-41)
def testParseThousandSeparatorInteger(self):
self.assertEqual(self.format.parse('+23,341', '+#,##0;-#,##0'),
23341)
self.assertEqual(self.format.parse('-23,341', '+#,##0;-#,##0'),
-23341)
self.assertEqual(self.format.parse('+0,041', '+#0,000;-#0,000'),
41)
self.assertEqual(self.format.parse('-0,041', '+#0,000;-#0,000'),
-41)
def testParseDecimal(self):
self.assertEqual(self.format.parse('23341.02', '###0.0#'),
23341.02)
self.assertEqual(self.format.parse('23341.1', '###0.0#'),
23341.1)
self.assertEqual(self.format.parse('23341.020', '###0.000#'),
23341.02)
def testParseScientificDecimal(self):
self.assertEqual(self.format.parse('2.334102E04', '0.00####E00'),
23341.02)
self.assertEqual(self.format.parse('2.3341020E004', '0.0000000E000'),
23341.02)
self.assertEqual(self.format.parse('0.0E0', '0.0#E0'),
0.0)
def testParseScientificDecimalSmallerOne(self):
self.assertEqual(self.format.parse('2.357E-02', '0.00####E00'),
0.02357)
self.assertEqual(self.format.parse('2.0000E-02', '0.0000E00'),
0.02)
def testParsePadding1WithoutPrefix(self):
self.assertEqual(self.format.parse(' 41', '* ##0;*_##0'),
41)
self.assertEqual(self.format.parse('_41', '* ##0;*_##0'),
-41)
def testParsePadding1WithPrefix(self):
self.assertEqual(self.format.parse(' +41', '* +##0;*_-##0'),
41)
self.assertEqual(self.format.parse('_-41', '* +##0;*_-##0'),
-41)
def testParsePadding1Padding2WithPrefix(self):
self.assertEqual(self.format.parse(' + 41', '* +* ###0;*_-*_###0'),
+41)
self.assertEqual(self.format.parse('__-_41', '* +* ###0;*_-*_###0'),
-41)
def testParsePadding1Scientific(self):
self.assertEqual(self.format.parse(' 4.102E1',
'* 0.0####E0;*_0.0####E0'),
41.02)
self.assertEqual(self.format.parse('__4.102E1',
'* 0.0####E0;*_0.0####E0'),
-41.02)
self.assertEqual(self.format.parse(' +4.102E1',
'* +0.0###E0;*_-0.0###E0'),
41.02)
self.assertEqual(self.format.parse('_-4.102E1',
'* +0.0###E0;*_-0.0###E0'),
-41.02)
def testParsePadding3WithoutSufffix(self):
self.assertEqual(self.format.parse('41.02 ', '#0.0###* ;#0.0###*_'),
41.02)
self.assertEqual(self.format.parse('41.02__', '#0.0###* ;#0.0###*_'),
-41.02)
def testParsePadding3WithSufffix(self):
self.assertEqual(
self.format.parse('[41.02 ]', '[#0.0###* ];(#0.0###*_)'),
41.02)
self.assertEqual(
self.format.parse('(41.02__)', '[#0.0###* ];(#0.0###*_)'),
-41.02)
def testParsePadding3Scientific(self):
self.assertEqual(self.format.parse('4.102E1 ',
'0.0##E0##* ;0.0##E0##*_'),
41.02)
self.assertEqual(self.format.parse('4.102E1__',
'0.0##E0##* ;0.0##E0##*_'),
-41.02)
self.assertEqual(self.format.parse('(4.102E1 )',
'(0.0##E0##* );0.0E0'),
41.02)
self.assertEqual(self.format.parse('[4.102E1__]',
'0.0E0;[0.0##E0##*_]'),
-41.02)
def testParsePadding3Padding4WithSuffix(self):
self.assertEqual(self.format.parse('(41.02 ) ', '(#0.0###* )* '),
41.02)
self.assertEqual(self.format.parse('(4.102E1 ) ', '(0.0##E0##* )* '),
41.02)
def testParseDecimalWithGermanDecimalSeparator(self):
format = NumberFormat(symbols={'decimal': ',', 'group': '.'})
self.assertEqual(format.parse('1.234,567', '#,##0.000'), 1234.567)
def testParseWithAlternativeExponentialSymbol(self):
format = NumberFormat(
symbols={'decimal': '.', 'group': ',', 'exponential': 'X'})
self.assertEqual(format.parse('1.2X11', '#.#E0'), 1.2e11)
def testFormatSimpleInteger(self):
self.assertEqual(self.format.format(23341, '###0'),
'23341')
self.assertEqual(self.format.format(41, '#000'),
'041')
def testFormatScientificInteger(self):
self.assertEqual(self.format.format(23341, '0.000#E0'),
'2.3341E4')
self.assertEqual(self.format.format(23341, '0.000#E00'),
'2.3341E04')
self.assertEqual(self.format.format(1, '0.##E0'),
'1E0')
self.assertEqual(self.format.format(1, '0.00E00'),
'1.00E00')
# This is a special case I found not working, but is used frequently
# in the new LDML Locale files.
self.assertEqual(self.format.format(23341, '0.000###E+00'),
'2.3341E+04')
def testFormatScientificZero(self):
self.assertEqual(self.format.format(0, '0.00E00'),
'0.00E00')
self.assertEqual(self.format.format(0, '0E0'),
'0E0')
def testFormatPosNegAlternativeInteger(self):
self.assertEqual(self.format.format(23341, '#000;#00'),
'23341')
self.assertEqual(self.format.format(41, '#000;#00'),
'041')
self.assertEqual(self.format.format(-23341, '#000;#00'),
'23341')
self.assertEqual(self.format.format(-41, '#000;#00'),
'41')
self.assertEqual(self.format.format(-1, '#000;#00'),
'01')
def testFormatPrefixedInteger(self):
self.assertEqual(self.format.format(23341, '+###0'),
'+23341')
self.assertEqual(self.format.format(41, '+#000'),
'+041')
self.assertEqual(self.format.format(-23341, '+###0'),
'+23341')
self.assertEqual(self.format.format(-41, '+#000'),
'+041')
def testFormatPosNegInteger(self):
self.assertEqual(self.format.format(23341, '+###0;-###0'),
'+23341')
self.assertEqual(self.format.format(41, '+#000;-#000'),
'+041')
self.assertEqual(self.format.format(-23341, '+###0;-###0'),
'-23341')
self.assertEqual(self.format.format(-41, '+#000;-#000'),
'-041')
def testFormatPosNegScientificInteger(self):
self.assertEqual(self.format.format(23341, '+0.00###E00;-0.00###E00'),
'+2.3341E04')
self.assertEqual(self.format.format(23341, '-0.00###E00;-0.00###E00'),
'-2.3341E04')
def testFormatThousandSeparatorInteger(self):
self.assertEqual(self.format.format(23341, '+#,##0;-#,##0'),
'+23,341')
self.assertEqual(self.format.format(-23341, '+#,##0;-#,##0'),
'-23,341')
self.assertEqual(self.format.format(41, '+#0,000;-#0,000'),
'+0,041')
self.assertEqual(self.format.format(-41, '+#0,000;-#0,000'),
'-0,041')
def testFormatDecimal(self):
self.assertEqual(self.format.format(23341.02357, '###0.0#'),
'23341.02')
self.assertEqual(self.format.format(23341.02357, '###0.000#'),
'23341.0236')
self.assertEqual(self.format.format(23341.02, '###0.000#'),
'23341.020')
def testRounding(self):
self.assertEqual(self.format.format(0.5, '#'), '1')
self.assertEqual(self.format.format(0.49, '#'), '0')
self.assertEqual(self.format.format(0.45, '0.0'), '0.5')
self.assertEqual(self.format.format(150, '0E0'), '2E2')
self.assertEqual(self.format.format(149, '0E0'), '1E2')
self.assertEqual(self.format.format(1.9999, '0.000'), '2.000')
self.assertEqual(self.format.format(1.9999, '0.0000'), '1.9999')
def testFormatScientificDecimal(self):
self.assertEqual(self.format.format(23341.02357, '0.00####E00'),
'2.334102E04')
self.assertEqual(self.format.format(23341.02, '0.0000000E000'),
'2.3341020E004')
def testFormatScientificDecimalSmallerOne(self):
self.assertEqual(self.format.format(0.02357, '0.00####E00'),
'2.357E-02')
self.assertEqual(self.format.format(0.02, '0.0000E00'),
'2.0000E-02')
def testFormatPadding1WithoutPrefix(self):
self.assertEqual(self.format.format(41, '* ##0;*_##0'),
' 41')
self.assertEqual(self.format.format(-41, '* ##0;*_##0'),
'_41')
def testFormatPadding1WithPrefix(self):
self.assertEqual(self.format.format(41, '* +##0;*_-##0'),
' +41')
self.assertEqual(self.format.format(-41, '* +##0;*_-##0'),
'_-41')
def testFormatPadding1Scientific(self):
self.assertEqual(self.format.format(41.02, '* 0.0####E0;*_0.0####E0'),
' 4.102E1')
self.assertEqual(self.format.format(-41.02, '* 0.0####E0;*_0.0####E0'),
'__4.102E1')
self.assertEqual(self.format.format(41.02, '* +0.0###E0;*_-0.0###E0'),
' +4.102E1')
self.assertEqual(self.format.format(-41.02, '* +0.0###E0;*_-0.0###E0'),
'_-4.102E1')
def testFormatPadding1Padding2WithPrefix(self):
self.assertEqual(self.format.format(41, '* +* ###0;*_-*_###0'),
' + 41')
self.assertEqual(self.format.format(-41, '* +* ###0;*_-*_###0'),
'__-_41')
def testFormatPadding3WithoutSufffix(self):
self.assertEqual(self.format.format(41.02, '#0.0###* ;#0.0###*_'),
'41.02 ')
self.assertEqual(self.format.format(-41.02, '#0.0###* ;#0.0###*_'),
'41.02__')
def testFormatPadding3WithSufffix(self):
self.assertEqual(self.format.format(41.02, '[#0.0###* ];(#0.0###*_)'),
'[41.02 ]')
self.assertEqual(self.format.format(-41.02, '[#0.0###* ];(#0.0###*_)'),
'(41.02__)')
def testFormatPadding3Scientific(self):
self.assertEqual(self.format.format(41.02, '0.0##E0##* ;0.0##E0##*_'),
'4.102E1 ')
self.assertEqual(self.format.format(-41.02, '0.0##E0##* ;0.0##E0##*_'),
'4.102E1__')
self.assertEqual(self.format.format(41.02, '(0.0##E0##* );0.0E0'),
'(4.102E1 )')
self.assertEqual(self.format.format(-41.02, '0.0E0;[0.0##E0##*_]'),
'[4.102E1__]')
def testFormatPadding3Padding4WithSuffix(self):
self.assertEqual(self.format.format(41.02, '(#0.0###* )* '),
'(41.02 ) ')
self.assertEqual(self.format.format(41.02, '(0.0##E0##* )* '),
'(4.102E1 ) ')
def test_suite():
return TestSuite((
makeSuite(TestDateTimePatternParser),
makeSuite(TestBuildDateTimeParseInfo),
makeSuite(TestDateTimeFormat),
makeSuite(TestNumberPatternParser),
makeSuite(TestNumberFormat),
))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#pragma once
#include <c10/core/SymBool.h>
#include <c10/core/SymNodeImpl.h>
#include <c10/macros/Export.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <numeric>
#include <optional>
#include <ostream>
#include <type_traits>
namespace c10 {
class SymFloat;
// SymInt represents either a regular int64_t, or a symbolic integer
// (represented in a type erased way as SymNode). The intention is for SymInt
// to represent symbolic sizes that arise when doing shape computation in
// operator kernels. This allows for tracing through programs without baking in
// concrete sizes into kernel calls.
//
// SymInt has an API equivalent to int64_t. In particular, it is a value type.
// Internally, SymInt is represented in a clever packed way, so that it only
// occupies one word of space; but morally, it is a union between an int64_t
// and an intrusive pointer to SymNodeImpl.
//
// Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where
// is_int() returns true
class C10_API SymInt {
public:
enum Unchecked {
UNCHECKED,
};
/*implicit*/ SymInt(int64_t d) : data_(d) {
if (is_heap_allocated()) {
// Large negative number, heap allocate it
promote_to_negative();
}
}
SymInt() : data_(0) {}
SymInt(SymNode n);
// unchecked c-tor accepting raw `data_`
// One appropriate use for this is when you are constructing a symint
// in a situation where you know it is non-negative (or, if it is negative,
// the negative value is -1; i.e., not user controlled)
SymInt(Unchecked /*unused*/, int64_t d) : data_(d) {}
// TODO: these implementations are not optimal because they allocate a
// temporary and then use the move constructor/assignment
SymInt(const SymInt& s) : data_(0) {
if (s.is_heap_allocated()) {
*this = SymInt(s.toSymNode());
} else {
data_ = s.data_;
}
}
SymInt(SymInt&& s) noexcept : data_(s.data_) {
s.data_ = 0;
}
SymInt& operator=(const SymInt& s) {
if (this != &s) {
if (s.is_heap_allocated()) {
*this = SymInt(s.toSymNode());
} else {
data_ = s.data_;
}
}
return *this;
}
SymInt& operator=(SymInt&& s) noexcept {
if (this != &s) {
release_(); // release the current SymNode if any
data_ = s.data_;
if (s.is_heap_allocated())
s.data_ = 0;
};
return *this;
}
SymNodeImpl* toSymNodeImplUnowned() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated());
uint64_t unextended_bits = static_cast<uint64_t>(data_) & ~MASK;
uint64_t sign_bit_mask = 1ULL << (62 - 1);
// https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c
uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask;
return static_cast<SymNodeImpl*>(
// NOLINTNEXTLINE(performance-no-int-to-ptr, bugprone*)
reinterpret_cast<void*>(static_cast<uintptr_t>(extended_bits)));
}
void release_() {
if (is_heap_allocated()) {
SymNode::reclaim(toSymNodeImplUnowned()); // steal
}
}
SymNodeImpl* release() && {
#ifndef C10_MOBILE
TORCH_INTERNAL_ASSERT(is_heap_allocated());
auto* r = toSymNodeImplUnowned();
data_ = 0; // transfer ownership
return r;
#else
TORCH_INTERNAL_ASSERT(false);
#endif
}
// Only valid if is_heap_allocated()
SymNode toSymNode() const;
// Guaranteed to return a SymNode, wrapping using base if necessary
SymNode wrap_node(const SymNode& base) const;
~SymInt() {
release_();
}
// Require the int to be non-symbolic, and if it is symbolic raise an
// error. This is safe to use for C++ code that doesn't work for symbolic
// shapes, and you don't have time to fix it immediately, as if we
// try to trigger the path in C++ you'll appropriately get an error
int64_t expect_int() const {
if (auto r = maybe_as_int()) {
return *r;
}
TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(
false, "when unpacking SymInt, expected int but got ", *this);
}
// Test if we have a hint for this int (e.g., guard_int would work).
// Most of the time this is true; it is only false when you have
// an unbacked SymInt.
bool has_hint() const;
// Insert a guard for the int to be its concrete value, and then return
// that value. This operation always works, even if the int is symbolic,
// so long as we know what the underlying value is (e.g., this won't work
// if you call it on the size of nonzero output). Don't blindly put this
// everywhere; you can cause overspecialization of PyTorch programs with
// this method.
//
// It should be called as guard_int(__FILE__, __LINE__). The file and line
// number can be used to diagnose overspecialization.
int64_t guard_int(const char* file, int64_t line) const;
// Distinguish actual symbolic values from constants stored on the heap
bool is_symbolic() const {
return is_heap_allocated() &&
!toSymNodeImplUnowned()->constant_int().has_value();
}
// N.B. It's important to keep this definition in the header
// as we expect if checks to be folded for mobile builds
// where `is_heap_allocated` is always false and optimize dead code paths
C10_ALWAYS_INLINE bool is_heap_allocated() const {
#ifdef C10_MOBILE
return false;
#else
return !check_range(data_);
#endif
}
SymInt operator+(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(*ma + *mb);
}
}
return operator_add_slow_path(sci);
}
SymInt operator-(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(*ma - *mb);
}
}
return operator_sub_slow_path(sci);
}
SymInt operator*(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(*ma * *mb);
}
}
return operator_mul_slow_path(sci);
}
SymInt operator/(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(*ma / *mb);
}
}
return operator_div_slow_path(sci);
}
SymInt operator%(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(*ma % *mb);
}
}
return operator_mod_slow_path(sci);
}
void operator*=(const SymInt& sci) {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
*this = SymInt(*ma * *mb);
return;
}
}
operator_imul_slow_path(sci);
}
void operator+=(const SymInt& sci) {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
*this = SymInt(*ma + *mb);
return;
}
}
operator_iadd_slow_path(sci);
}
void operator/=(const SymInt& sci) {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
*this = SymInt(*ma / *mb);
return;
}
}
operator_idiv_slow_path(sci);
}
SymInt clone() const;
SymBool sym_eq(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymBool(*ma == *mb);
}
}
return sym_eq_slow_path(sci);
}
SymBool sym_ne(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymBool(*ma != *mb);
}
}
return sym_ne_slow_path(sci);
}
SymBool sym_lt(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymBool(*ma < *mb);
}
}
return sym_lt_slow_path(sci);
}
SymBool sym_le(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymBool(*ma <= *mb);
}
}
return sym_le_slow_path(sci);
}
SymBool sym_gt(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymBool(*ma > *mb);
}
}
return sym_gt_slow_path(sci);
}
SymBool sym_ge(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymBool(*ma >= *mb);
}
}
return sym_ge_slow_path(sci);
}
bool operator==(const SymInt& o) const {
return sym_eq(o).guard_bool(__FILE__, __LINE__);
}
bool operator!=(const SymInt& o) const {
return sym_ne(o).guard_bool(__FILE__, __LINE__);
}
bool operator<(const SymInt& o) const {
return sym_lt(o).guard_bool(__FILE__, __LINE__);
}
bool operator<=(const SymInt& o) const {
return sym_le(o).guard_bool(__FILE__, __LINE__);
}
bool operator>(const SymInt& o) const {
return sym_gt(o).guard_bool(__FILE__, __LINE__);
}
bool operator>=(const SymInt& o) const {
return sym_ge(o).guard_bool(__FILE__, __LINE__);
}
SymInt min(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(std::min(*ma, *mb));
}
}
return min_slow_path(sci);
}
SymInt max(const SymInt& sci) const {
if (auto ma = maybe_as_int()) {
if (auto mb = sci.maybe_as_int()) {
return SymInt(std::max(*ma, *mb));
}
}
return max_slow_path(sci);
}
// If both are symbolic, this checks if
// they share the same node.
// If both are not symbolic this just checks normal equality.
bool is_same(const SymInt& other) const;
operator SymFloat() const;
void unsafe_set_data(size_t nbytes) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
data_ = static_cast<int64_t>(nbytes);
}
// Don't use this. Prefer maybe_as_int instead
int64_t as_int_unchecked() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
return data_;
}
std::optional<int64_t> maybe_as_int() const {
if (!is_heap_allocated()) {
return data_;
}
return maybe_as_int_slow_path();
}
// Return whether the integer is directly coercible to a SymInt
// without requiring heap allocation. You don't need to use this
// to check if you can pass an integer to SymInt; this is guaranteed
// to work (it just might heap allocate!)
static bool check_range(int64_t i) {
return i > MAX_UNREPRESENTABLE_INT;
}
// Return the min representable integer as a SymInt without
// heap allocation. For quantities that count bytes (or larger),
// this is still much larger than you need, so you may consider
// using this as a more efficient version of MIN_INT
static constexpr int64_t min_representable_int() {
return MAX_UNREPRESENTABLE_INT + 1;
}
private:
void promote_to_negative();
SymInt operator_add_slow_path(const SymInt& sci) const;
SymInt operator_sub_slow_path(const SymInt& sci) const;
SymInt operator_mul_slow_path(const SymInt& sci) const;
SymInt operator_div_slow_path(const SymInt& sci) const;
SymInt operator_mod_slow_path(const SymInt& sci) const;
void operator_imul_slow_path(const SymInt& sci);
void operator_iadd_slow_path(const SymInt& sci);
void operator_idiv_slow_path(const SymInt& sci);
SymBool sym_eq_slow_path(const SymInt& sci) const;
SymBool sym_ne_slow_path(const SymInt& sci) const;
SymBool sym_lt_slow_path(const SymInt& sci) const;
SymBool sym_le_slow_path(const SymInt& sci) const;
SymBool sym_gt_slow_path(const SymInt& sci) const;
SymBool sym_ge_slow_path(const SymInt& sci) const;
SymInt min_slow_path(const SymInt& sci) const;
SymInt max_slow_path(const SymInt& sci) const;
std::optional<int64_t> maybe_as_int_slow_path() const;
// Constraints on the internal representation:
//
// - Should represent positive and small negative ints
// - No conversion necessary for operations on ints
// - Must represent valid 64-bit pointers
// - Is symbolic test should be FAST (two arithmetic instructions is too
// much).
// This code being a hotpath is based on Strobelight profiles of
// is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd
// (you will need to change the time window).
//
// So, the scheme is to reserve large negative numbers (assuming
// two's complement):
//
// - 0b0.... means we are a positive int
// - 0b11... means we are a small negative int
// - 0b10... means we are are a pointer. This means that
// [-2^63, -2^62-1] are not representable as ints.
// We don't actually need all of this space as on x86_64
// as the top 16bits aren't used for anything
static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61;
static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61;
// We must manually translate the bit pattern test into a greater
// than test because compiler doesn't figure it out:
// https://godbolt.org/z/356aferaW
static constexpr int64_t MAX_UNREPRESENTABLE_INT =
-1LL & static_cast<int64_t>(~(1ULL << 62));
int64_t data_;
};
/// Sum of a list of SymInt; accumulates into the c10::SymInt expression
template <
typename C,
typename std::enable_if_t<
std::is_same_v<typename C::value_type, c10::SymInt>,
int> = 0>
inline c10::SymInt multiply_integers(const C& container) {
return std::accumulate(
container.begin(),
container.end(),
c10::SymInt(1),
[](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
}
template <
typename Iter,
typename = std::enable_if_t<std::is_same_v<
typename std::iterator_traits<Iter>::value_type,
c10::SymInt>>>
inline c10::SymInt multiply_integers(Iter begin, Iter end) {
return std::accumulate(
begin,
end,
c10::SymInt(1),
[](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
}
#define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
C10_API RetTy operator%(const SymInt& a, scalar_t b); \
C10_API RetTy operator%(scalar_t a, const SymInt& b);
#define DECLARE_SYMINT_OP(scalar_t, RetTy) \
C10_API RetTy operator+(const SymInt& a, scalar_t b); \
C10_API RetTy operator-(const SymInt& a, scalar_t b); \
C10_API RetTy operator*(const SymInt& a, scalar_t b); \
C10_API RetTy operator/(const SymInt& a, scalar_t b); \
C10_API RetTy operator+(scalar_t a, const SymInt& b); \
C10_API RetTy operator-(scalar_t a, const SymInt& b); \
C10_API RetTy operator*(scalar_t a, const SymInt& b); \
C10_API RetTy operator/(scalar_t a, const SymInt& b); \
C10_API bool operator==(const SymInt& a, scalar_t b); \
C10_API bool operator!=(const SymInt& a, scalar_t b); \
C10_API bool operator<(const SymInt& a, scalar_t b); \
C10_API bool operator<=(const SymInt& a, scalar_t b); \
C10_API bool operator>(const SymInt& a, scalar_t b); \
C10_API bool operator>=(const SymInt& a, scalar_t b); \
C10_API bool operator==(scalar_t a, const SymInt& b); \
C10_API bool operator!=(scalar_t a, const SymInt& b); \
C10_API bool operator<(scalar_t a, const SymInt& b); \
C10_API bool operator<=(scalar_t a, const SymInt& b); \
C10_API bool operator>(scalar_t a, const SymInt& b); \
C10_API bool operator>=(scalar_t a, const SymInt& b);
DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt)
DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt)
DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt)
DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt)
DECLARE_SYMINT_OP(int64_t, SymInt)
DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work
DECLARE_SYMINT_OP(uint64_t, SymInt)
DECLARE_SYMINT_OP(uint32_t, SymInt)
DECLARE_SYMINT_OP(double, SymFloat)
DECLARE_SYMINT_OP(float, SymFloat) // just for completeness
// On OSX size_t is different than uint64_t so we have to
// define it separately
#if defined(__APPLE__)
DECLARE_SYMINT_OP_INTONLY(size_t, SymInt)
DECLARE_SYMINT_OP(size_t, SymInt)
#endif
#undef DECLARE_SYMINT_OP
C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s);
C10_API SymInt operator-(const SymInt& s);
inline bool sym_eq(int64_t a, int64_t b) {
return a == b;
}
inline SymBool sym_eq(const SymInt& a, const SymInt& b) {
return a.sym_eq(b);
}
inline bool sym_ne(int64_t a, int64_t b) {
return a != b;
}
inline SymBool sym_ne(const SymInt& a, const SymInt& b) {
return a.sym_ne(b);
}
inline bool sym_lt(int64_t a, int64_t b) {
return a < b;
}
inline SymBool sym_lt(const SymInt& a, const SymInt& b) {
return a.sym_lt(b);
}
inline bool sym_le(int64_t a, int64_t b) {
return a <= b;
}
inline SymBool sym_le(const SymInt& a, const SymInt& b) {
return a.sym_le(b);
}
inline bool sym_gt(int64_t a, int64_t b) {
return a > b;
}
inline SymBool sym_gt(const SymInt& a, const SymInt& b) {
return a.sym_gt(b);
}
inline bool sym_ge(int64_t a, int64_t b) {
return a >= b;
}
inline SymBool sym_ge(const SymInt& a, const SymInt& b) {
return a.sym_ge(b);
}
} // namespace c10
#include <limits>
namespace std {
template <>
class numeric_limits<c10::SymInt> {
public:
static constexpr bool is_specialized = true;
static constexpr int64_t max() noexcept {
return std::numeric_limits<int64_t>::max();
}
static constexpr int64_t min() noexcept {
return std::numeric_limits<int64_t>::min();
}
static constexpr bool is_signed = true;
static constexpr bool is_integer = true;
};
} // namespace std
|
c
|
github
|
https://github.com/pytorch/pytorch
|
c10/core/SymInt.h
|
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package coordination
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Lease) DeepCopyInto(out *Lease) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease.
func (in *Lease) DeepCopy() *Lease {
if in == nil {
return nil
}
out := new(Lease)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Lease) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate.
func (in *LeaseCandidate) DeepCopy() *LeaseCandidate {
if in == nil {
return nil
}
out := new(LeaseCandidate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LeaseCandidate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]LeaseCandidate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList.
func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList {
if in == nil {
return nil
}
out := new(LeaseCandidateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LeaseCandidateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) {
*out = *in
if in.PingTime != nil {
in, out := &in.PingTime, &out.PingTime
*out = (*in).DeepCopy()
}
if in.RenewTime != nil {
in, out := &in.RenewTime, &out.RenewTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec.
func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec {
if in == nil {
return nil
}
out := new(LeaseCandidateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseList) DeepCopyInto(out *LeaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Lease, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList.
func (in *LeaseList) DeepCopy() *LeaseList {
if in == nil {
return nil
}
out := new(LeaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LeaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) {
*out = *in
if in.HolderIdentity != nil {
in, out := &in.HolderIdentity, &out.HolderIdentity
*out = new(string)
**out = **in
}
if in.LeaseDurationSeconds != nil {
in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds
*out = new(int32)
**out = **in
}
if in.AcquireTime != nil {
in, out := &in.AcquireTime, &out.AcquireTime
*out = (*in).DeepCopy()
}
if in.RenewTime != nil {
in, out := &in.RenewTime, &out.RenewTime
*out = (*in).DeepCopy()
}
if in.LeaseTransitions != nil {
in, out := &in.LeaseTransitions, &out.LeaseTransitions
*out = new(int32)
**out = **in
}
if in.Strategy != nil {
in, out := &in.Strategy, &out.Strategy
*out = new(CoordinatedLeaseStrategy)
**out = **in
}
if in.PreferredHolder != nil {
in, out := &in.PreferredHolder, &out.PreferredHolder
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec.
func (in *LeaseSpec) DeepCopy() *LeaseSpec {
if in == nil {
return nil
}
out := new(LeaseSpec)
in.DeepCopyInto(out)
return out
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/apis/coordination/zz_generated.deepcopy.go
|
import os
import sys
OldPy = sys.version_info[0] == 2 and sys.version_info[1] < 7
class TestingConfig:
""""
TestingConfig - Information on the tests inside a suite.
"""
@staticmethod
def fromdefaults(litConfig):
"""
fromdefaults(litConfig) -> TestingConfig
Create a TestingConfig object with default values.
"""
# Set the environment based on the command line arguments.
environment = {
'PATH' : os.pathsep.join(litConfig.path +
[os.environ.get('PATH','')]),
'LLVM_DISABLE_CRASH_REPORT' : '1',
}
pass_vars = ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'SYSTEMROOT', 'TERM',
'LD_PRELOAD', 'ASAN_OPTIONS', 'UBSAN_OPTIONS',
'LSAN_OPTIONS']
for var in pass_vars:
val = os.environ.get(var, '')
# Check for empty string as some variables such as LD_PRELOAD cannot be empty
# ('') for OS's such as OpenBSD.
if val:
environment[var] = val
if sys.platform == 'win32':
environment.update({
'INCLUDE' : os.environ.get('INCLUDE',''),
'PATHEXT' : os.environ.get('PATHEXT',''),
'PYTHONUNBUFFERED' : '1',
'TEMP' : os.environ.get('TEMP',''),
'TMP' : os.environ.get('TMP',''),
})
# The option to preserve TEMP, TMP, and TMPDIR.
# This is intended to check how many temporary files would be generated
# (and be not cleaned up) in automated builders.
if 'LIT_PRESERVES_TMP' in os.environ:
environment.update({
'TEMP' : os.environ.get('TEMP',''),
'TMP' : os.environ.get('TMP',''),
'TMPDIR' : os.environ.get('TMPDIR',''),
})
# Set the default available features based on the LitConfig.
available_features = []
if litConfig.useValgrind:
available_features.append('valgrind')
if litConfig.valgrindLeakCheck:
available_features.append('vg_leak')
return TestingConfig(None,
name = '<unnamed>',
suffixes = set(),
test_format = None,
environment = environment,
substitutions = [],
unsupported = False,
test_exec_root = None,
test_source_root = None,
excludes = [],
available_features = available_features,
pipefail = True)
def load_from_path(self, path, litConfig):
"""
load_from_path(path, litConfig)
Load the configuration module at the provided path into the given config
object.
"""
# Load the config script data.
data = None
if not OldPy:
f = open(path)
try:
data = f.read()
except:
litConfig.fatal('unable to load config file: %r' % (path,))
f.close()
# Execute the config script to initialize the object.
cfg_globals = dict(globals())
cfg_globals['config'] = self
cfg_globals['lit_config'] = litConfig
cfg_globals['__file__'] = path
try:
if OldPy:
execfile(path, cfg_globals)
else:
exec(compile(data, path, 'exec'), cfg_globals, None)
if litConfig.debug:
litConfig.note('... loaded config %r' % path)
except SystemExit:
e = sys.exc_info()[1]
# We allow normal system exit inside a config file to just
# return control without error.
if e.args:
raise
except:
import traceback
litConfig.fatal(
'unable to parse config file %r, traceback: %s' % (
path, traceback.format_exc()))
self.finish(litConfig)
def __init__(self, parent, name, suffixes, test_format,
environment, substitutions, unsupported,
test_exec_root, test_source_root, excludes,
available_features, pipefail):
self.parent = parent
self.name = str(name)
self.suffixes = set(suffixes)
self.test_format = test_format
self.environment = dict(environment)
self.substitutions = list(substitutions)
self.unsupported = unsupported
self.test_exec_root = test_exec_root
self.test_source_root = test_source_root
self.excludes = set(excludes)
self.available_features = set(available_features)
self.pipefail = pipefail
def finish(self, litConfig):
"""finish() - Finish this config object, after loading is complete."""
self.name = str(self.name)
self.suffixes = set(self.suffixes)
self.environment = dict(self.environment)
self.substitutions = list(self.substitutions)
if self.test_exec_root is not None:
# FIXME: This should really only be suite in test suite config
# files. Should we distinguish them?
self.test_exec_root = str(self.test_exec_root)
if self.test_source_root is not None:
# FIXME: This should really only be suite in test suite config
# files. Should we distinguish them?
self.test_source_root = str(self.test_source_root)
self.excludes = set(self.excludes)
@property
def root(self):
"""root attribute - The root configuration for the test suite."""
if self.parent is None:
return self
else:
return self.parent.root
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userns
import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
)
// Here go types that are common for all supported OS (windows, linux).
type userNsPodsManager interface {
HandlerSupportsUserNamespaces(runtimeHandler string) (bool, error)
GetPodDir(podUID types.UID) string
ListPodsFromDisk() ([]types.UID, error)
GetKubeletMappings(logger klog.Logger, idsPerPod uint32) (uint32, uint32, error)
GetMaxPods() int
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/kubelet/userns/types.go
|
package network
import (
"github.com/moby/moby/v2/daemon/server/router"
)
// networkRouter is a router to talk with the network controller
type networkRouter struct {
backend Backend
cluster ClusterBackend
routes []router.Route
}
// NewRouter initializes a new network router
func NewRouter(b Backend, c ClusterBackend) router.Router {
r := &networkRouter{
backend: b,
cluster: c,
}
r.initRoutes()
return r
}
// Routes returns the available routes to the network controller
func (n *networkRouter) Routes() []router.Route {
return n.routes
}
func (n *networkRouter) initRoutes() {
n.routes = []router.Route{
// GET
router.NewGetRoute("/networks", n.getNetworksList),
router.NewGetRoute("/networks/", n.getNetworksList),
router.NewGetRoute("/networks/{id:.+}", n.getNetwork),
// POST
router.NewPostRoute("/networks/create", n.postNetworkCreate),
router.NewPostRoute("/networks/{id:.*}/connect", n.postNetworkConnect),
router.NewPostRoute("/networks/{id:.*}/disconnect", n.postNetworkDisconnect),
router.NewPostRoute("/networks/prune", n.postNetworkPrune, router.WithMinimumAPIVersion("1.25")),
// DELETE
router.NewDeleteRoute("/networks/{id:.*}", n.deleteNetwork),
}
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/server/router/network/network.go
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 STI (<https://github.com/sumihai-tekindo>).
# @author Pambudi Satria <pambudi.satria@yahoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import osv, fields
from openerp.osv.expression import get_unaccent_wrapper
class account_invoice(osv.Model):
_inherit = "account.invoice"
_columns = {
'department_id': fields.many2one('account.invoice.department', 'Department', readonly=True, states={'draft': [('readonly', False)]}, ondelete='set null'),
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
obj_sequence = self.pool.get('ir.sequence')
obj_journal = self.pool.get('account.journal')
obj_dept = self.pool.get('account.invoice.department')
number = ''
journal = self._default_journal(cr, uid, context)
date_invoice = vals.get('date_invoice', fields.date.context_today(self, cr, uid, context))
if vals.get('journal_id') and vals['journal_id']:
journal = obj_journal.browse(cr, uid, vals['journal_id'])
if ('department_id' not in vals) or ('department_id' in vals and not vals['department_id']):
vals['department_id'] = journal.department_id and journal.department_id.id or False
if vals.get('department_id') and vals['department_id']:
department = obj_dept.browse(cr, uid, vals['department_id'])
if context.get('type', False) in ('in_invoice', 'in_refund') or (vals.get('type') and vals['type'] in ('in_invoice', 'in_refund')):
if journal.sequence_id:
ctx = dict(context)
ctx['ir_sequence_date'] = date_invoice
number = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, ctx)
else:
raise osv.except_osv(_('Error!'), _('Please define a sequence on the journal.'))
if number:
number = "%s/%s" % (department.name, number)
vals['internal_number'] = number
res_id = super(account_invoice, self).create(cr, uid, vals, context)
if context.get('type', False) in ('in_invoice', 'in_refund') or (vals.get('type') and vals['type'] in ('in_invoice', 'in_refund')):
self.write(cr, uid, [res_id], {'number': number})
return res_id
@api.multi
def action_cancel(self):
res = super(account_invoice, self).action_cancel()
if self.type in ('in_invoice', 'in_refund'):
self.write({'number': self.internal_number})
return res
@api.multi
def finalize_invoice_move_lines(self, move_lines):
""" finalize_invoice_move_lines(move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and
possibly alter the move lines to be created by an invoice, for
special cases.
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
for move in move_lines:
move[2]['department_id'] = self.department_id and self.department_id.id or False
return move_lines
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'department_id': fields.many2one('account.invoice.department', 'Department', ondelete='set null', required=True),
}
class account_move_line(osv.osv):
_inherit = "account.move.line"
_columns = {
'department_id': fields.many2one('account.invoice.department', 'Department', ondelete='set null'),
}
def _query_get(self, cr, uid, obj='l', context=None):
result = super(account_move_line, self)._query_get(cr, uid, obj=obj, context=context)
context = dict(context or {})
query = ''
query_params = {}
if 'department_ids' in context:
if context.get('department_ids'):
query_params['department_ids'] = tuple(context['department_ids'])
query += ' AND ' + obj + '.department_id IN %(department_ids)s'
else:
query += ' AND ' + obj + '.department_id IS null'
if 'analytic_ids' in context:
if context.get('analytic_ids'):
analytics = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', context['analytic_ids'])], context=context)
query_params['analytic_ids'] = tuple(analytics)
query += ' AND ' + obj + '.analytic_account_id IN %(analytic_ids)s'
else:
query += ' AND ' + obj + '.analytic_account_id IS null'
if query:
result += cr.mogrify(query, query_params)
return result
def create(self, cr, uid, vals, context=None, check=True):
result = super(account_move_line, self).create(cr, uid, vals, context=context)
if result and ('department_id' not in vals) and ('journal_id' in vals) and vals['journal_id']:
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context=context)
department = journal.department_id
self.write(cr, uid, result, {'department_id': department and department.id or False}, context=context)
return result
class account_invoice_department(osv.Model):
_name = "account.invoice.department"
_order = "description asc"
_columns = {
'name': fields.char('Code', size=4, required=True, copy=False),
'description': fields.char('Description'),
'active': fields.boolean('Active'),
'user_id': fields.many2one('res.users', string='Manager'),
}
_defaults = {
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if context.get('description_only'):
name = record.description
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights(cr, uid, 'read')
where_query = self._where_calc(cr, uid, args, context=context)
self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(cr)
query = """SELECT id
FROM account_invoice_department
{where} ({name} {operator} {percent}
OR {description} {operator} {percent})
ORDER BY {description}
""".format(where=where_str, operator=operator,
name=unaccent('name'),
description=unaccent('description'),
percent=unaccent('%s'))
where_clause_params += [search_name, search_name]
if limit:
query += ' limit %s'
where_clause_params.append(limit)
cr.execute(query, where_clause_params)
ids = map(lambda x: x[0], cr.fetchall())
if ids:
return self.name_get(cr, uid, ids, context)
else:
return []
return super(account_invoice_department, self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Base callback handler for LangChain."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from collections.abc import Sequence
from uuid import UUID
from tenacity import RetryCallState
from typing_extensions import Self
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.documents import Document
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
_LOGGER = logging.getLogger(__name__)
class RetrieverManagerMixin:
"""Mixin for `Retriever` callbacks."""
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when `Retriever` errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when `Retriever` ends running.
Args:
documents: The documents retrieved.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
class LLMManagerMixin:
"""Mixin for LLM callbacks."""
def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> Any:
"""Run on new output token.
Only available when streaming is enabled.
For both chat models and non-chat models (legacy text completion LLMs).
Args:
token: The new token.
chunk: The new generated chunk, containing content and other information.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> Any:
"""Run when LLM ends running.
Args:
response: The response which was generated.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> Any:
"""Run when LLM errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
class ChainManagerMixin:
"""Mixin for chain callbacks."""
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when chain ends running.
Args:
outputs: The outputs of the chain.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when chain errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run on agent action.
Args:
action: The agent action.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run on the agent end.
Args:
finish: The agent finish.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
class ToolManagerMixin:
"""Mixin for tool callbacks."""
def on_tool_end(
self,
output: Any,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when the tool ends running.
Args:
output: The output of the tool.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when tool errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
class CallbackManagerMixin:
"""Mixin for callback manager."""
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Run when LLM starts running.
!!! warning
This method is called for non-chat models (regular text completion LLMs). If
you're implementing a handler for a chat model, you should use
`on_chat_model_start` instead.
Args:
serialized: The serialized LLM.
prompts: The prompts.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Run when a chat model starts running.
!!! warning
This method is called for chat models. If you're implementing a handler for
a non-chat model, you should use `on_llm_start` instead.
Args:
serialized: The serialized chat model.
messages: The messages.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`"
raise NotImplementedError(msg)
def on_retriever_start(
self,
serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Run when the `Retriever` starts running.
Args:
serialized: The serialized `Retriever`.
query: The query.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
def on_chain_start(
self,
serialized: dict[str, Any],
inputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Run when a chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
def on_tool_start(
self,
serialized: dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Run when the tool starts running.
Args:
serialized: The serialized chain.
input_str: The input string.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
inputs: The inputs.
**kwargs: Additional keyword arguments.
"""
class RunManagerMixin:
"""Mixin for run manager."""
def on_text(
self,
text: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run on an arbitrary text.
Args:
text: The text.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_retry(
self,
retry_state: RetryCallState,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run on a retry event.
Args:
retry_state: The retry state.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Override to define a handler for a custom event.
Args:
name: The name of the custom event.
data: The data for the custom event.
Format will match the format specified by the user.
run_id: The ID of the run.
tags: The tags associated with the custom event (includes inherited tags).
metadata: The metadata associated with the custom event (includes inherited
metadata).
"""
class BaseCallbackHandler(
LLMManagerMixin,
ChainManagerMixin,
ToolManagerMixin,
RetrieverManagerMixin,
CallbackManagerMixin,
RunManagerMixin,
):
"""Base callback handler."""
raise_error: bool = False
"""Whether to raise an error if an exception occurs."""
run_inline: bool = False
"""Whether to run the callback inline."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return False
@property
def ignore_retry(self) -> bool:
"""Whether to ignore retry callbacks."""
return False
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return False
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return False
@property
def ignore_retriever(self) -> bool:
"""Whether to ignore retriever callbacks."""
return False
@property
def ignore_chat_model(self) -> bool:
"""Whether to ignore chat model callbacks."""
return False
@property
def ignore_custom_event(self) -> bool:
"""Ignore custom event."""
return False
class AsyncCallbackHandler(BaseCallbackHandler):
"""Base async callback handler."""
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""Run when the model starts running.
!!! warning
This method is called for non-chat models (regular text completion LLMs). If
you're implementing a handler for a chat model, you should use
`on_chat_model_start` instead.
Args:
serialized: The serialized LLM.
prompts: The prompts.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""Run when a chat model starts running.
!!! warning
This method is called for chat models. If you're implementing a handler for
a non-chat model, you should use `on_llm_start` instead.
Args:
serialized: The serialized chat model.
messages: The messages.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`"
raise NotImplementedError(msg)
async def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run on new output token. Only available when streaming is enabled.
For both chat models and non-chat models (legacy text completion LLMs).
Args:
token: The new token.
chunk: The new generated chunk, containing content and other information.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run when the model ends running.
Args:
response: The response which was generated.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
async def on_chain_start(
self,
serialized: dict[str, Any],
inputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""Run when a chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
async def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run when a chain ends running.
Args:
outputs: The outputs of the chain.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_tool_start(
self,
serialized: dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""Run when the tool starts running.
Args:
serialized: The serialized tool.
input_str: The input string.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
inputs: The inputs.
**kwargs: Additional keyword arguments.
"""
async def on_tool_end(
self,
output: Any,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run when the tool ends running.
Args:
output: The output of the tool.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_text(
self,
text: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run on an arbitrary text.
Args:
text: The text.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_retry(
self,
retry_state: RetryCallState,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run on a retry event.
Args:
retry_state: The retry state.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
"""
async def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run on agent action.
Args:
action: The agent action.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run on the agent end.
Args:
finish: The agent finish.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_retriever_start(
self,
serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""Run on the retriever start.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
metadata: The metadata.
**kwargs: Additional keyword arguments.
"""
async def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run on the retriever end.
Args:
documents: The documents retrieved.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
"""Run on retriever error.
Args:
error: The error that occurred.
run_id: The ID of the current run.
parent_run_id: The ID of the parent run.
tags: The tags.
**kwargs: Additional keyword arguments.
"""
async def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""Override to define a handler for custom events.
Args:
name: The name of the custom event.
data: The data for the custom event.
Format will match the format specified by the user.
run_id: The ID of the run.
tags: The tags associated with the custom event (includes inherited tags).
metadata: The metadata associated with the custom event (includes inherited
metadata).
"""
class BaseCallbackManager(CallbackManagerMixin):
"""Base callback manager."""
def __init__(
self,
handlers: list[BaseCallbackHandler],
inheritable_handlers: list[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
tags: list[str] | None = None,
inheritable_tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
) -> None:
"""Initialize callback manager.
Args:
handlers: The handlers.
inheritable_handlers: The inheritable handlers.
parent_run_id: The parent run ID.
tags: The tags.
inheritable_tags: The inheritable tags.
metadata: The metadata.
inheritable_metadata: The inheritable metadata.
"""
self.handlers: list[BaseCallbackHandler] = handlers
self.inheritable_handlers: list[BaseCallbackHandler] = (
inheritable_handlers or []
)
self.parent_run_id: UUID | None = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
def copy(self) -> Self:
"""Return a copy of the callback manager."""
return self.__class__(
handlers=self.handlers.copy(),
inheritable_handlers=self.inheritable_handlers.copy(),
parent_run_id=self.parent_run_id,
tags=self.tags.copy(),
inheritable_tags=self.inheritable_tags.copy(),
metadata=self.metadata.copy(),
inheritable_metadata=self.inheritable_metadata.copy(),
)
def merge(self, other: BaseCallbackManager) -> Self:
"""Merge the callback manager with another callback manager.
May be overwritten in subclasses.
Primarily used internally within `merge_configs`.
Returns:
The merged callback manager of the same type as the current object.
Example:
```python
# Merging two callback managers`
from langchain_core.callbacks.manager import (
CallbackManager,
trace_as_chain_group,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
manager = CallbackManager(handlers=[StdOutCallbackHandler()], tags=["tag2"])
with trace_as_chain_group("My Group Name", tags=["tag1"]) as group_manager:
merged_manager = group_manager.merge(manager)
print(merged_manager.handlers)
# [
# <langchain_core.callbacks.stdout.StdOutCallbackHandler object at ...>,
# <langchain_core.callbacks.streaming_stdout.StreamingStdOutCallbackHandler object at ...>,
# ]
print(merged_manager.tags)
# ['tag2', 'tag1']
```
""" # noqa: E501
# Combine handlers and inheritable_handlers separately, using sets
# to deduplicate (order not preserved)
combined_handlers = list(set(self.handlers) | set(other.handlers))
combined_inheritable = list(
set(self.inheritable_handlers) | set(other.inheritable_handlers)
)
return self.__class__(
parent_run_id=self.parent_run_id or other.parent_run_id,
handlers=combined_handlers,
inheritable_handlers=combined_inheritable,
tags=list(set(self.tags + other.tags)),
inheritable_tags=list(set(self.inheritable_tags + other.inheritable_tags)),
metadata={
**self.metadata,
**other.metadata,
},
inheritable_metadata={
**self.inheritable_metadata,
**other.inheritable_metadata,
},
)
@property
def is_async(self) -> bool:
"""Whether the callback manager is async."""
return False
def add_handler(
self,
handler: BaseCallbackHandler,
inherit: bool = True, # noqa: FBT001,FBT002
) -> None:
"""Add a handler to the callback manager.
Args:
handler: The handler to add.
inherit: Whether to inherit the handler.
"""
if handler not in self.handlers:
self.handlers.append(handler)
if inherit and handler not in self.inheritable_handlers:
self.inheritable_handlers.append(handler)
def remove_handler(self, handler: BaseCallbackHandler) -> None:
"""Remove a handler from the callback manager.
Args:
handler: The handler to remove.
"""
if handler in self.handlers:
self.handlers.remove(handler)
if handler in self.inheritable_handlers:
self.inheritable_handlers.remove(handler)
def set_handlers(
self,
handlers: list[BaseCallbackHandler],
inherit: bool = True, # noqa: FBT001,FBT002
) -> None:
"""Set handlers as the only handlers on the callback manager.
Args:
handlers: The handlers to set.
inherit: Whether to inherit the handlers.
"""
self.handlers = []
self.inheritable_handlers = []
for handler in handlers:
self.add_handler(handler, inherit=inherit)
def set_handler(
self,
handler: BaseCallbackHandler,
inherit: bool = True, # noqa: FBT001,FBT002
) -> None:
"""Set handler as the only handler on the callback manager.
Args:
handler: The handler to set.
inherit: Whether to inherit the handler.
"""
self.set_handlers([handler], inherit=inherit)
def add_tags(
self,
tags: list[str],
inherit: bool = True, # noqa: FBT001,FBT002
) -> None:
"""Add tags to the callback manager.
Args:
tags: The tags to add.
inherit: Whether to inherit the tags.
"""
for tag in tags:
if tag in self.tags:
self.remove_tags([tag])
self.tags.extend(tags)
if inherit:
self.inheritable_tags.extend(tags)
def remove_tags(self, tags: list[str]) -> None:
"""Remove tags from the callback manager.
Args:
tags: The tags to remove.
"""
for tag in tags:
if tag in self.tags:
self.tags.remove(tag)
if tag in self.inheritable_tags:
self.inheritable_tags.remove(tag)
def add_metadata(
self,
metadata: dict[str, Any],
inherit: bool = True, # noqa: FBT001,FBT002
) -> None:
"""Add metadata to the callback manager.
Args:
metadata: The metadata to add.
inherit: Whether to inherit the metadata.
"""
self.metadata.update(metadata)
if inherit:
self.inheritable_metadata.update(metadata)
def remove_metadata(self, keys: list[str]) -> None:
"""Remove metadata from the callback manager.
Args:
keys: The keys to remove.
"""
for key in keys:
self.metadata.pop(key, None)
self.inheritable_metadata.pop(key, None)
Callbacks = list[BaseCallbackHandler] | BaseCallbackManager | None
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/core/langchain_core/callbacks/base.py
|
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ShellTest(ProcessTest, SiteTest, unittest.TestCase):
command = 'shell'
@defer.inlineCallbacks
def test_empty(self):
_, out, _ = yield self.execute(['-c', 'item'])
assert '{}' in out
@defer.inlineCallbacks
def test_response_body(self):
_, out, _ = yield self.execute([self.url('/text'), '-c', 'response.body'])
assert 'Works' in out
@defer.inlineCallbacks
def test_response_type_text(self):
_, out, _ = yield self.execute([self.url('/text'), '-c', 'type(response)'])
assert 'TextResponse' in out
@defer.inlineCallbacks
def test_response_type_html(self):
_, out, _ = yield self.execute([self.url('/html'), '-c', 'type(response)'])
assert 'HtmlResponse' in out
@defer.inlineCallbacks
def test_response_selector_html(self):
xpath = 'response.xpath("//p[@class=\'one\']/text()").extract()[0]'
_, out, _ = yield self.execute([self.url('/html'), '-c', xpath])
self.assertEqual(out.strip(), 'Works')
@defer.inlineCallbacks
def test_response_encoding_gb18030(self):
_, out, _ = yield self.execute([self.url('/enc-gb18030'), '-c', 'response.encoding'])
self.assertEqual(out.strip(), 'gb18030')
@defer.inlineCallbacks
def test_redirect(self):
_, out, _ = yield self.execute([self.url('/redirect'), '-c', 'response.url'])
assert out.strip().endswith('/redirected')
@defer.inlineCallbacks
def test_request_replace(self):
url = self.url('/text')
code = "fetch('{0}') or fetch(response.request.replace(method='POST'))"
errcode, out, _ = yield self.execute(['-c', code.format(url)])
self.assertEqual(errcode, 0, out)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Thomas Moore (@tmmruk)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_netbios
version_added: '2.9'
short_description: Manage NetBIOS over TCP/IP settings on Windows.
description:
- Enables or disables NetBIOS on Windows network adapters.
- Can be used to protect a system against NBT-NS poisoning and avoid NBNS broadcast storms.
- Settings can be applied system wide or per adapter.
options:
state:
description:
- Whether NetBIOS should be enabled, disabled, or default (use setting from DHCP server or if static IP address is assigned enable NetBIOS).
choices:
- enabled
- disabled
- default
required: yes
type: str
adapter_names:
description:
- List of adapter names for which to manage NetBIOS settings. If this option is omitted then configuration is applied to all adapters on the system.
- The adapter name used is the connection caption in the Network Control Panel or via C(Get-NetAdapter), eg C(Ethernet 2).
type: list
required: no
author:
- Thomas Moore (@tmmruk)
notes:
- Changing NetBIOS settings does not usually require a reboot and will take effect immediately.
- UDP port 137/138/139 will no longer be listening once NetBIOS is disabled.
'''
EXAMPLES = r'''
- name: Disable NetBIOS system wide
win_netbios:
state: disabled
- name: Disable NetBIOS on Ethernet2
win_netbios:
state: disabled
adapter_names:
- Ethernet2
- name: Enable NetBIOS on Public and Backup adapters
win_netbios:
state: enabled
adapter_names:
- Public
- Backup
- name: Set NetBIOS to system default on all adapters
win_netbios:
state: default
'''
RETURN = r'''
reboot_required:
description: Boolean value stating whether a system reboot is required.
returned: always
type: bool
sample: true
'''
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class SamplingOpsThreadingTest(test.TestCase):
def testMultiThreadedEstimateDataDistribution(self):
num_classes = 10
# Set up graph.
random_seed.set_random_seed(1234)
label = math_ops.cast(
math_ops.round(random_ops.random_uniform([1]) * num_classes),
dtypes_lib.int32)
prob_estimate = sampling_ops._estimate_data_distribution( # pylint: disable=protected-access
label, num_classes)
# Check that prob_estimate is well-behaved in a multithreaded context.
_, _, [prob_estimate] = sampling_ops._verify_input( # pylint: disable=protected-access
[], label, [prob_estimate])
# Use queues to run multiple threads over the graph, each of which
# fetches `prob_estimate`.
queue = data_flow_ops.FIFOQueue(
capacity=25,
dtypes=[prob_estimate.dtype],
shapes=[prob_estimate.get_shape()])
enqueue_op = queue.enqueue([prob_estimate])
queue_runner_impl.add_queue_runner(
queue_runner_impl.QueueRunner(queue, [enqueue_op] * 25))
out_tensor = queue.dequeue()
# Run the multi-threaded session.
with self.cached_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(25):
sess.run([out_tensor])
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""This module provides functionality to work with zip files."""
# Perhaps all methods should work with a wildcard to limit searches in some
# way (examples: *.po, base.xlf, pootle-terminology.tbx)
#TODO: consider also providing directories as we currently provide files
#TODO: refactor with existing zip code (xpi.py, etc.)
from translate.storage import factory
from translate.storage import directory
from translate.misc import wStringIO
from os import path
from zipfile import ZipFile
class ZIPFile(directory.Directory):
"""This class represents a ZIP file like a directory."""
def __init__(self, filename=None):
self.filename = filename
self.filedata = []
def unit_iter(self):
"""Iterator over all the units in all the files in this zip file."""
for dirname, filename in self.file_iter():
strfile = wStringIO.StringIO(self.archive.read(path.join(dirname, filename)))
strfile.filename = filename
store = factory.getobject(strfile)
#TODO: don't regenerate all the storage objects
for unit in store.unit_iter():
yield unit
def scanfiles(self):
"""Populate the internal file data."""
self.filedata = []
self.archive = ZipFile(self.filename)
for completename in self.archive.namelist():
dir, name = path.split(completename)
self.filedata.append((dir, name))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#############################################################################
# File : Filter.py
# Package : rpmlint
# Author : Frederic Lepied
# Created on : Sat Oct 23 15:52:27 1999
# Purpose : filter the output of rpmlint to allow exceptions.
#############################################################################
import locale
import sys
import textwrap
import Config
try:
import Testing
except ImportError:
Testing = None
_rawout = None
_diagnostic = list()
_badness_score = 0
printed_messages = {"I": 0, "W": 0, "E": 0}
if sys.stdout.isatty():
def __print(s):
print(s)
else:
def __print(s):
if isinstance(s, unicode):
s = s.encode(locale.getpreferredencoding(), "replace")
print(s)
def printInfo(pkg, reason, *details):
_print("I", pkg, reason, details)
def printWarning(pkg, reason, *details):
_print("W", pkg, reason, details)
def printError(pkg, reason, *details):
_print("E", pkg, reason, details)
def _print(msgtype, pkg, reason, details):
global _badness_score
threshold = badnessThreshold()
badness = 0
if threshold >= 0:
badness = Config.badness(reason)
# anything with badness is an error
if badness:
msgtype = 'E'
# errors without badness become warnings
elif msgtype == 'E':
msgtype = 'W'
ln = ""
if pkg.current_linenum is not None:
ln = "%s:" % pkg.current_linenum
arch = ""
if pkg.arch is not None:
arch = ".%s" % pkg.arch
s = "%s%s:%s %s: %s" % (pkg.name, arch, ln, msgtype, reason)
if badness:
s = s + " (Badness: %d)" % badness
for d in details:
s = s + " %s" % d
if Testing and Testing.isTest():
Testing.addOutput(s)
else:
if _rawout:
print >>_rawout, s.encode(locale.getpreferredencoding(), "replace")
if not Config.isFiltered(s):
printed_messages[msgtype] += 1
_badness_score += badness
if threshold >= 0:
_diagnostic.append(s + "\n")
else:
__print(s)
if Config.info:
printDescriptions(reason)
return True
return False
def printDescriptions(reason):
try:
d = _details[reason]
if d and d != '' and d != "\n":
__print(textwrap.fill(d, 78))
__print("")
except KeyError:
pass
def _diag_sortkey(x):
xs = x.split()
return (xs[2], xs[1])
def printAllReasons():
threshold = badnessThreshold()
if threshold < 0:
return False
global _diagnostic
_diagnostic.sort(key=_diag_sortkey, reverse=True)
last_reason = ''
for diag in _diagnostic:
if Config.info:
reason = diag.split()[2]
if reason != last_reason:
if len(last_reason):
printDescriptions(last_reason)
last_reason = reason
__print(diag)
if Config.info and len(last_reason):
printDescriptions(last_reason)
_diagnostic = list()
return _badness_score > threshold
_details = {}
def addDetails(*details):
for idx in range(int(len(details)/2)):
if not details[idx*2] in _details:
_details[details[idx*2]] = details[idx*2+1]
def badnessScore():
return _badness_score
def badnessThreshold():
return Config.getOption("BadnessThreshold", -1)
def setRawOut(file):
global _rawout
if _rawout:
_rawout.close()
_rawout = open(file, "w")
# Filter.py ends here
# Local variables:
# indent-tabs-mode: nil
# py-indent-offset: 4
# End:
# ex: ts=4 sw=4 et
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# textventure.py
#################
## Textventure ##
## ckjbgames ####
## 2017 #########
#################
## Imports
import random # Map randomization
import pickle # For game saving
import sys # Various uses
import socket # Eventually for connections
import MySQLdb # For connecting to MySQL
import json # Decode JSON in MySQL
import os # Save files, etc.
import tty # "Press any key to continue..."
## Classes
class gameItem(object):
"""
An in-game item that can
be used by the player and will
be placed randomly in the rooms
Instances are stored in the Inventory class
Name is the item's name
Effect should be an instance of the Effect class
If Uses is set to true, an item lasts forever
If Uses is set to one, an item has one use, etc.
Uses should NOT be set to false!
Description is self-explainatory
"""
def __init__(self, name = None, effect = None, uses = None, description = None, movable = True, can_hold = True):
"""
This is just an initialzer with a docstring
"""
self.name = name
self.effect = effect
self.uses = uses
self.description = description
self.movable = movable
self.can_hold = can_hold
def __repr__(self):
"""
Prints out an item's attributes in an actually readable format
See the main docstring for more info on the main attributes
"""
uses = self.uses
if uses == True:
uses = "*lasts forever*"
template = "\n%s" +\
"\nEffect:" +\
"\n%s" +\
"\nUses:" +\
"\n%s" +\
"\nDescription:" +\
"\n%s"
return template%(self.name, self.effect, uses, self.description)
def itemName(self):
"""
Used for returning only the item's name (for room descriptions and
inventory listings)
"""
return self.name
def MySQLformat(self):
return [self.name,
self.effect,
self.uses,
self.description,
self.movable,
self.can_hold]
class Inventory(object):
"""
The player's inventory
Contains instances of the gameItem class
"""
def __init__(self):
"""
Initialize the inventory to an empty dictionary
"""
self.player_inventory = {}
def __repr__(self):
"""
A separate __repr__ method so that all the
full attributes of each item are not shown
in the inventory listing.
Basically a plain old inventory listing
"""
if self.player_inventory == {}:
eggs = "You're not carrying anything."
else:
eggs = ''
for spam in self.player_inventory:
eggs = eggs + '\n' + spam.itemName()
return eggs
def addItem(self, item_to_add):
"""
Adds an item to the inventory
item_to_add should be an instance of
the gameItem class
"""
self.player_inventory[item_to_add.itemName()] = item_to_add
def viewItem(self,item_name):
"""
View an item in the inventory
Should be the name of an item
"""
try:
return self.player_inventory[item_name]
except (NameError, KeyError):
return "You don't have that item!"
def toJSON(self):
return json.dumps(self,default=lambda o: o.__dict__,
sort_keys=True)
class Room(object):
"""
A room with a description, other rooms that it leads to,
and contents
name is self-explainatory
items_room is a dictionary containing instances of the gameItem class that are in a room
If items_room is set to false, the room contains no items.
description is also self-explainatory
in_room is a flag for if you are in the room or not
"""
def __init__(self, name = None, items_room = False, description = None, in_room = False):
"""
Initializes attributes described in the main docstring
"""
self.name = name
self.items_room = items_room
self.description = description
self.in_room = in_room
def changeFlag(self, val):
"""
Change in_room flag
Possible fix for an issue
"""
self.in_room = val
def toJSON(self):
return json.dumps(self,default=lambda o: o.__dict__,
sort_keys=True)
class allRooms(object):
"""
A map-type arrangement of all rooms as a 2-dimensional list/array
More effective than having a really, really complicated
initializer method for the Room class
The player will never actually view this unless they have a map
There is a __repr__ method in case they do
"""
def __init__(self, rooms = [],coords = (0,0)):
"""
Initializer method
rooms should be a list containing other lists, which
should be a combination of instances of Room and
the False value
See the main docstring for more info
"""
self.rooms = rooms
self.coords = coords
def __repr__(self):
"""
Kind of displays a map
# - A room, but you are not in it
Space - There is no room here
@ - You are here
"""
map_of = ''
for row in self.rooms:
for room in row:
if room == False:
map_of += ' '
elif room.in_room == True:
map_of += '@'
else:
map_of += '#'
map_of += '\r\n' # In case of Windows
return map_of
def move(self, direction = 8):
"""
A method for moving to another room
Uses a try-except block for determining
if there is a room that can be entered from
the specified direction
These directions are based on the number pad
So:
7 8 9
\ | /
4 - 5 - 6
/ | \
1 2 3
In other words:
8 - North
2 - South
4 - West
6 - East
A map of the game kind of looks like
corridors in Nethack :)
"""
if direction == 8 or direction == 'n':
try:
if isinstance(self.rooms[self.coords[0] - 1][self.coords[1]],Room):
self.rooms[self.coords[0]][self.coords[1]].changeFlag(False)
self.rooms[self.coords[0] - 1][self.coords[1]].changeFlag(True)
self.coords =(self.coords[0] - 1,self.coords[1])
else:
raise NoRoom
except (IndexError,NoRoom,AttributeError):
return "There is no room to enter in this direction!"
elif direction == 2 or direction == 's':
try:
if isinstance(self.rooms[self.coords[0] + 1][self.coords[1]],Room):
self.rooms[self.coords[0]][self.coords[1]].changeFlag(False)
self.rooms[self.coords[0] + 1][self.coords[1]].changeFlag(True)
self.coords = (self.coords[0] + 1,self.coords[1])
else:
raise NoRoom
except (IndexError,NoRoom,AttributeError):
return "There is no room to enter in this direction!"
elif direction == 4 or direction == 'w':
try:
if isinstance(self.rooms[self.coords[0]][self.coords[1] - 1],Room):
self.rooms[self.coords[0]][self.coords[1]].changeFlag(False)
self.rooms[self.coords[0]][self.coords[1] - 1].changeFlag(True)
self.coords = (self.coords[0],self.coords[1] - 1)
else:
raise NoRoom
except (IndexError,NoRoom,AttributeError):
return "There is no room to enter in this direction!"
elif direction == 6 or direction == 'e':
try:
if isinstance(self.rooms[self.coords[0]][self.coords[1] + 1],Room):
self.rooms[self.coords[0]][self.coords[1]].changeFlag(False)
self.rooms[self.coords[0]][self.coords[1] + 1].changeFlag(True)
self.coords =(self.coords[0], self.coords[1] + 1)
else:
raise NoRoom
except (IndexError,NoRoom,AttributeError):
return "There is no room to enter in this direction!"
else:
return "Sorry, that's not a valid direction."
def randomgen(self):
"""
For random generation of a map
Will eventually use MySQL to find item templates
"""
possible = [False, Room('spam',False,'Too many eggs!',False)] # Possibilities for a room; will be updated soon
size = (random.randint(10,25),random.randint(10,25)) # Make the map size anywhere from 10x10 to 25x25
new_rooms = [] # An empty list that new rooms that will be created
for x in range(size[0]): # Start a for loop
row = [] # Make an empty list that will be appended to
for y in range(size[1]): # Start another for loop
row.append(random.choice(possible)) # Append another room or empty space to the current row
new_rooms.append(row) # Append the row to new_rooms
self.rooms = new_rooms # Assign new_rooms to the attribute self.rooms
class NoRoom(Exception):
"""
An exception for the absence of a room to enter
"""
pass
class Controller(object):
"""
Purposes:
1. Loads and saves games using pickle
Store save files by default in
/var/games/textventure/saves
See "Installation" in the Wiki for more
2. Performs some functions having to do with
interactions between the player and the environment
3. Executes textventure commands the player inputs
"""
def __init__(self,inventory = None,allrooms = None):
self.game = [inventory,allrooms]
self.inv,self.allrooms=self.game
def loadgame(self,username = None):
"""
Loads a game with pickle.load
A save file should be in the /var/games/textventure/saves directory
The extension doesn't matter, but I have decided to use *.pickle
"""
print 'Loading game...'
try:
with open('/var/games/textventure/saves/%s.pickle'%(username),'r') as f:
self.game = pickle.load(f)
self.inv,self.allrooms=self.game
except EnvironmentError:
print 'The game could not be loaded. Sorry about that.'
pressanykey()
sys.exit()
else:
print 'Success!'
return self.game
def savegame(self,username = None):
"""
Save a game with pickle.dump
Save file path is mentioned in the docs of loadgame()
"""
print 'Saving game...'
try:
with open('/var/games/textventure/saves/%s.pickle'%(username),'w') as f:
pickle.dump(self.game,f)
print 'Game saved!'
quitconfirm=raw_input('Do you wish to quit (y/n)? ')
if quitconfirm[0] == 'y' or quitconfirm[0] == 'Y':
pressanykey()
sys.exit()
elif quitconfirm[0] == 'n' or quitconfirm[0] == 'N':
print 'Please do continue, good player!'
else:
print "I don't think that's a yes or a no."
pressanykey()
except EnvironmentError:
print 'The game could not be saved. Sorry about that.'
pressanykey()
sys.exit()
else:
print 'Success!'
def commands(self):
"""
Interpret commands.
It's actually pretty simple.
"""
prompt='textventure$ '
command_list=['l','i','p','d','m','u','h','S']
while True:
typed=raw_input(prompt).split(' ')
command=typed[0]
arg=typed[1:]
if command in command_list:
if command == 'l':
print self.allrooms.rooms[self.allrooms.coords[1]][self.allrooms.coords[0]].
## Functions
def pressanykey():
print 'Press any key to continue...'
tty.setraw(1)
sys.stdin.read(1)
## Main Program
if __name__ == '__main__':
control=Controller('','')
username=sys.argv[1]
savepath="/var/games/textventure/saves/%s.pickle"%(username)
if os.path.exists(savepath):
inv,allrooms=control.loadgame(username)
else:
open(savepath,'a').close() # .close() could be omitted, but only in CPython.
# It was kept so it will work in other implementations.
|
unknown
|
codeparrot/codeparrot-clean
| ||
import urllib
import json
import pprint
class WpSeekApi:
def find_similar(self, keyword, limit):
finder = SimilarFinder()
result = finder.find(keyword, limit)
self.return_to_vim(result)
def find_topics(self, keyword, limit):
finder = TopicFinder()
result = finder.find(keyword, limit)
self.return_to_vim(result)
def return_to_vim(self, results):
try:
import vim
vim.command('let api_result = %s' % results)
except:
print results
class TopicFinder:
def find(self, keyword, limit):
url = self.build_url_for(keyword, limit)
response = urllib.urlopen(url).read()
response = response[1:-2]
return response.replace("\n", " ")
def build_url_for(self, keyword, limit):
builder = WpSeekUrlBuilder()
return builder.build('gettopics', keyword, { 'limit': limit })
class SimilarFinder:
def find(self, keyword, limit):
url = self.build_url_for(keyword, limit)
response = urllib.urlopen(url).read()
response = response[1:-2]
return response.replace("\n", " ")
def build_url_for(self, keyword, limit):
builder = WpSeekUrlBuilder()
return builder.build('getsimilar', keyword, { 'limit': limit })
class WpSeekUrlBuilder:
def build(self, method, query, opts = {}):
opts['method'] = 'wordpress.%s' % method
opts['s'] = query
args = urllib.urlencode(opts)
return "http://api.wpseek.com/?%s" % args
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"context"
"crypto/sha256"
errorspkg "errors"
"io"
"time"
"github.com/dustin/go-humanize"
"go.uber.org/zap"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/apply"
"go.etcd.io/etcd/server/v3/etcdserver/errors"
serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
"go.etcd.io/etcd/server/v3/storage"
"go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/storage/mvcc"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/raft/v3"
)
type KVGetter interface {
KV() mvcc.WatchableKV
}
type BackendGetter interface {
Backend() backend.Backend
}
type Defrager interface {
Defragment() error
}
type Alarmer interface {
// Alarms is implemented in Server interface located in etcdserver/server.go
// It returns a list of alarms present in the AlarmStore
Alarms() []*pb.AlarmMember
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
}
type Downgrader interface {
Downgrade(ctx context.Context, dr *pb.DowngradeRequest) (*pb.DowngradeResponse, error)
}
type LeaderTransferrer interface {
MoveLeader(ctx context.Context, lead, target uint64) error
}
type ClusterStatusGetter interface {
IsLearner() bool
}
type ConfigGetter interface {
Config() config.ServerConfig
}
type maintenanceServer struct {
lg *zap.Logger
rg apply.RaftStatusGetter
hasher mvcc.HashStorage
bg BackendGetter
defrag Defrager
a Alarmer
lt LeaderTransferrer
hdr header
cs ClusterStatusGetter
d Downgrader
vs serverversion.Server
cg ConfigGetter
healthNotifier notifier
// we want compile errors if new methods are added
pb.UnsafeMaintenanceServer
}
func NewMaintenanceServer(s *etcdserver.EtcdServer, healthNotifier notifier) pb.MaintenanceServer {
srv := &maintenanceServer{
lg: s.Cfg.Logger,
rg: s,
hasher: s.KV().HashStorage(),
bg: s,
defrag: s,
a: s,
lt: s,
hdr: newHeader(s),
cs: s,
d: s,
vs: etcdserver.NewServerVersionAdapter(s),
healthNotifier: healthNotifier,
cg: s,
}
if srv.lg == nil {
srv.lg = zap.NewNop()
}
return &authMaintenanceServer{srv, &AuthAdmin{s}}
}
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
ms.lg.Info("starting defragment")
ms.healthNotifier.defragStarted()
defer ms.healthNotifier.defragFinished()
err := ms.defrag.Defragment()
if err != nil {
ms.lg.Warn("failed to defragment", zap.Error(err))
return nil, togRPCError(err)
}
ms.lg.Info("finished defragment")
return &pb.DefragmentResponse{}, nil
}
// big enough size to hold >1 OS pages in the buffer
const snapshotSendBufferSize = 32 * 1024
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx())
storageVersion := ""
if ver != nil {
storageVersion = ver.String()
}
snap := ms.bg.Backend().Snapshot()
pr, pw := io.Pipe()
defer pr.Close()
go func() {
snap.WriteTo(pw)
if err := snap.Close(); err != nil {
ms.lg.Warn("failed to close snapshot", zap.Error(err))
}
pw.Close()
}()
// record SHA digest of snapshot data
// used for integrity checks during snapshot restore operation
h := sha256.New()
sent := int64(0)
total := snap.Size()
size := humanize.Bytes(uint64(total))
start := time.Now()
ms.lg.Info("sending database snapshot to client",
zap.Int64("total-bytes", total),
zap.String("size", size),
zap.String("storage-version", storageVersion),
)
for total-sent > 0 {
// buffer just holds read bytes from stream
// response size is multiple of OS page size, fetched in boltdb
// e.g. 4*1024
// NOTE: srv.Send does not wait until the message is received by the client.
// Therefore the buffer can not be safely reused between Send operations
buf := make([]byte, snapshotSendBufferSize)
n, err := io.ReadFull(pr, buf)
if err != nil && !errorspkg.Is(err, io.EOF) && !errorspkg.Is(err, io.ErrUnexpectedEOF) {
return togRPCError(err)
}
sent += int64(n)
// if total is x * snapshotSendBufferSize. it is possible that
// resp.RemainingBytes == 0
// resp.Blob == zero byte but not nil
// does this make server response sent to client nil in proto
// and client stops receiving from snapshot stream before
// server sends snapshot SHA?
// No, the client will still receive non-nil response
// until server closes the stream with EOF
resp := &pb.SnapshotResponse{
RemainingBytes: uint64(total - sent),
Blob: buf[:n],
Version: storageVersion,
}
if err = srv.Send(resp); err != nil {
return togRPCError(err)
}
h.Write(buf[:n])
}
// send SHA digest for integrity checks
// during snapshot restore operation
sha := h.Sum(nil)
ms.lg.Info("sending database sha256 checksum to client",
zap.Int64("total-bytes", total),
zap.Int("checksum-size", len(sha)),
)
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion}
if err := srv.Send(hresp); err != nil {
return togRPCError(err)
}
ms.lg.Info("successfully sent database snapshot to client",
zap.Int64("total-bytes", total),
zap.String("size", size),
zap.Duration("took", time.Since(start)),
zap.String("storage-version", storageVersion),
)
return nil
}
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
h, rev, err := ms.hasher.Hash()
if err != nil {
return nil, togRPCError(err)
}
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
ms.hdr.fill(resp.Header)
return resp, nil
}
func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
h, rev, err := ms.hasher.HashByRev(r.Revision)
if err != nil {
return nil, togRPCError(err)
}
resp := &pb.HashKVResponse{
Header: &pb.ResponseHeader{Revision: rev},
Hash: h.Hash,
CompactRevision: h.CompactRevision,
HashRevision: h.Revision,
}
ms.hdr.fill(resp.Header)
return resp, nil
}
func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
resp, err := ms.a.Alarm(ctx, ar)
if err != nil {
return nil, togRPCError(err)
}
if resp.Header == nil {
resp.Header = &pb.ResponseHeader{}
}
ms.hdr.fill(resp.Header)
return resp, nil
}
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
hdr := &pb.ResponseHeader{}
ms.hdr.fill(hdr)
resp := &pb.StatusResponse{
Header: hdr,
Version: version.Version,
Leader: uint64(ms.rg.Leader()),
RaftIndex: ms.rg.CommittedIndex(),
RaftAppliedIndex: ms.rg.AppliedIndex(),
RaftTerm: ms.rg.Term(),
DbSize: ms.bg.Backend().Size(),
DbSizeInUse: ms.bg.Backend().SizeInUse(),
IsLearner: ms.cs.IsLearner(),
DbSizeQuota: ms.cg.Config().QuotaBackendBytes,
DowngradeInfo: &pb.DowngradeInfo{Enabled: false},
}
if resp.DbSizeQuota == 0 {
resp.DbSizeQuota = storage.DefaultQuotaBytes
}
if storageVersion := ms.vs.GetStorageVersion(); storageVersion != nil {
resp.StorageVersion = storageVersion.String()
}
if downgradeInfo := ms.vs.GetDowngradeInfo(); downgradeInfo != nil {
resp.DowngradeInfo = &pb.DowngradeInfo{
Enabled: downgradeInfo.Enabled,
TargetVersion: downgradeInfo.TargetVersion,
}
}
if resp.Leader == raft.None {
resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error())
}
for _, a := range ms.a.Alarms() {
resp.Errors = append(resp.Errors, a.String())
}
return resp, nil
}
func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
if ms.rg.MemberID() != ms.rg.Leader() {
return nil, rpctypes.ErrGRPCNotLeader
}
if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
return nil, togRPCError(err)
}
return &pb.MoveLeaderResponse{}, nil
}
func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
resp, err := ms.d.Downgrade(ctx, r)
if err != nil {
return nil, togRPCError(err)
}
resp.Header = &pb.ResponseHeader{}
ms.hdr.fill(resp.Header)
return resp, nil
}
type authMaintenanceServer struct {
*maintenanceServer
*AuthAdmin
}
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Defragment(ctx, sr)
}
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
if err := ams.isPermitted(srv.Context()); err != nil {
return togRPCError(err)
}
return ams.maintenanceServer.Snapshot(sr, srv)
}
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Hash(ctx, r)
}
func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.HashKV(ctx, r)
}
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Status(ctx, ar)
}
func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.MoveLeader(ctx, tr)
}
func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
if err := ams.isPermitted(ctx); err != nil {
return nil, togRPCError(err)
}
return ams.maintenanceServer.Downgrade(ctx, r)
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
server/etcdserver/api/v3rpc/maintenance.go
|
/*
* Copyright 2014-2023 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.tests.server.jetty.jakarta
import io.ktor.server.jetty.jakarta.*
import io.ktor.server.testing.suites.*
import kotlin.test.Ignore
import kotlin.time.Duration.Companion.milliseconds
class JettyBlockingServletContainerCompressionTest :
CompressionTestSuite<JettyApplicationEngineBase, JettyApplicationEngineBase.Configuration>(Servlet(async = false))
class JettyBlockingServletContainerContentTest :
ContentTestSuite<JettyApplicationEngineBase, JettyApplicationEngineBase.Configuration>(Servlet(async = false)) {
@Ignore // KTOR-9263
override fun funkyChunked() {
super.funkyChunked()
}
}
class JettyBlockingServletContainerHttpServerCommonTest :
HttpServerCommonTestSuite<JettyApplicationEngineBase, JettyApplicationEngineBase.Configuration>(
Servlet(async = false)
) {
override fun testFlushingHeaders() {
// no op
}
}
class JettyBlockingServletContainerHttpServerJvmTest :
HttpServerJvmTestSuite<JettyApplicationEngineBase, JettyApplicationEngineBase.Configuration>(
Servlet(async = false)
) {
@Ignore
override fun testUpgrade() {
}
@Ignore
override fun testPipelining() {
}
@Ignore
override fun testPipeliningWithFlushingHeaders() {
}
}
class JettyBlockingServletContainerSustainabilityTest :
SustainabilityTestSuite<JettyApplicationEngineBase, JettyApplicationEngineBase.Configuration>(
Servlet(async = false)
) {
@Ignore
override fun validateCallCoroutineContext() {}
}
class JettyBlockingServletServerPluginTest :
ServerPluginsTestSuite<JettyApplicationEngineBase, JettyApplicationEngineBase.Configuration>(
Servlet(async = false)
) {
init {
enableHttp2 = false
enableSsl = false
}
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-server/ktor-server-jetty-jakarta/jvm/test/io/ktor/tests/server/jetty/jakarta/JettyBlockingServletContainerTest.kt
|
/*-------------------------------------------------------------------------
*
* xlogwait.h
* Declarations for WAL flush, write, and replay waiting routines.
*
* Copyright (c) 2025-2026, PostgreSQL Global Development Group
*
* src/include/access/xlogwait.h
*
*-------------------------------------------------------------------------
*/
#ifndef XLOG_WAIT_H
#define XLOG_WAIT_H
#include "access/xlogdefs.h"
#include "lib/pairingheap.h"
#include "port/atomics.h"
#include "storage/procnumber.h"
#include "storage/spin.h"
#include "tcop/dest.h"
/*
* Result statuses for WaitForLSN().
*/
typedef enum
{
WAIT_LSN_RESULT_SUCCESS, /* Target LSN is reached */
WAIT_LSN_RESULT_NOT_IN_RECOVERY, /* Recovery ended before or during our
* wait */
WAIT_LSN_RESULT_TIMEOUT /* Timeout occurred */
} WaitLSNResult;
/*
* LSN type for waiting facility.
*/
typedef enum WaitLSNType
{
/* Standby wait types (walreceiver/startup wakes) */
WAIT_LSN_TYPE_STANDBY_REPLAY,
WAIT_LSN_TYPE_STANDBY_WRITE,
WAIT_LSN_TYPE_STANDBY_FLUSH,
/* Primary wait types (WAL writer/backends wake) */
WAIT_LSN_TYPE_PRIMARY_FLUSH,
} WaitLSNType;
#define WAIT_LSN_TYPE_COUNT (WAIT_LSN_TYPE_PRIMARY_FLUSH + 1)
/*
* WaitLSNProcInfo - the shared memory structure representing information
* about the single process, which may wait for LSN operations. An item of
* waitLSNState->procInfos array.
*/
typedef struct WaitLSNProcInfo
{
/* LSN, which this process is waiting for */
XLogRecPtr waitLSN;
/* The type of LSN to wait */
WaitLSNType lsnType;
/* Process to wake up once the waitLSN is reached */
ProcNumber procno;
/*
* Heap membership flag. A process can wait for only one LSN type at a
* time, so a single flag suffices (tracked by the lsnType field).
*/
bool inHeap;
/* Pairing heap node for the waiters' heap (one per process) */
pairingheap_node heapNode;
} WaitLSNProcInfo;
/*
* WaitLSNState - the shared memory state for the LSN waiting facility.
*/
typedef struct WaitLSNState
{
/*
* The minimum LSN values some process is waiting for. Used for the
* fast-path checking if we need to wake up any waiters after replaying a
* WAL record. Could be read lock-less. Update protected by WaitLSNLock.
*/
pg_atomic_uint64 minWaitedLSN[WAIT_LSN_TYPE_COUNT];
/*
* A pairing heaps of waiting processes ordered by LSN values (least LSN
* is on top). Protected by WaitLSNLock.
*/
pairingheap waitersHeap[WAIT_LSN_TYPE_COUNT];
/*
* An array with per-process information, indexed by the process number.
* Protected by WaitLSNLock.
*/
WaitLSNProcInfo procInfos[FLEXIBLE_ARRAY_MEMBER];
} WaitLSNState;
extern PGDLLIMPORT WaitLSNState *waitLSNState;
extern Size WaitLSNShmemSize(void);
extern void WaitLSNShmemInit(void);
extern XLogRecPtr GetCurrentLSNForWaitType(WaitLSNType lsnType);
extern void WaitLSNWakeup(WaitLSNType lsnType, XLogRecPtr currentLSN);
extern void WaitLSNCleanup(void);
extern WaitLSNResult WaitForLSN(WaitLSNType lsnType, XLogRecPtr targetLSN,
int64 timeout);
#endif /* XLOG_WAIT_H */
|
c
|
github
|
https://github.com/postgres/postgres
|
src/include/access/xlogwait.h
|
-- temperature conversion table (celsius to farenheit)
for c0=-20,50-1,10 do
io.write("C ")
for c=c0,c0+10-1 do
io.write(string.format("%3.0f ",c))
end
io.write("\n")
io.write("F ")
for c=c0,c0+10-1 do
f=(9/5)*c+32
io.write(string.format("%3.0f ",f))
end
io.write("\n\n")
end
|
unknown
|
github
|
https://github.com/redis/redis
|
deps/lua/test/cf.lua
|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.scheduling.concurrent;
import java.util.Properties;
import java.util.concurrent.ThreadFactory;
import javax.naming.NamingException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.jndi.JndiLocatorDelegate;
import org.springframework.jndi.JndiTemplate;
/**
* JNDI-based variant of {@link CustomizableThreadFactory}, performing a default lookup
* for JSR-236's "java:comp/DefaultManagedThreadFactory" in a Jakarta EE environment,
* falling back to the local {@link CustomizableThreadFactory} setup if not found.
*
* <p>This is a convenient way to use managed threads when running in a Jakarta EE
* environment, simply using regular local threads otherwise - without conditional
* setup (i.e. without profiles).
*
* <p>Note: This class is not strictly JSR-236 based; it can work with any regular
* {@link java.util.concurrent.ThreadFactory} that can be found in JNDI. Therefore,
* the default JNDI name "java:comp/DefaultManagedThreadFactory" can be customized
* through the {@link #setJndiName "jndiName"} bean property.
*
* @author Juergen Hoeller
* @since 4.0
*/
@SuppressWarnings("serial")
public class DefaultManagedAwareThreadFactory extends CustomizableThreadFactory implements InitializingBean {
protected final Log logger = LogFactory.getLog(getClass());
private final JndiLocatorDelegate jndiLocator = new JndiLocatorDelegate();
private @Nullable String jndiName = "java:comp/DefaultManagedThreadFactory";
private @Nullable ThreadFactory threadFactory;
/**
* Set the JNDI template to use for JNDI lookups.
* @see org.springframework.jndi.JndiAccessor#setJndiTemplate
*/
public void setJndiTemplate(JndiTemplate jndiTemplate) {
this.jndiLocator.setJndiTemplate(jndiTemplate);
}
/**
* Set the JNDI environment to use for JNDI lookups.
* @see org.springframework.jndi.JndiAccessor#setJndiEnvironment
*/
public void setJndiEnvironment(Properties jndiEnvironment) {
this.jndiLocator.setJndiEnvironment(jndiEnvironment);
}
/**
* Set whether the lookup occurs in a Jakarta EE container, i.e. if the prefix
* "java:comp/env/" needs to be added if the JNDI name doesn't already
* contain it. PersistenceAnnotationBeanPostProcessor's default is "true".
* @see org.springframework.jndi.JndiLocatorSupport#setResourceRef
*/
public void setResourceRef(boolean resourceRef) {
this.jndiLocator.setResourceRef(resourceRef);
}
/**
* Specify a JNDI name of the {@link java.util.concurrent.ThreadFactory} to delegate to,
* replacing the default JNDI name "java:comp/DefaultManagedThreadFactory".
* <p>This can either be a fully qualified JNDI name, or the JNDI name relative
* to the current environment naming context if "resourceRef" is set to "true".
* @see #setResourceRef
*/
public void setJndiName(String jndiName) {
this.jndiName = jndiName;
}
@Override
public void afterPropertiesSet() throws NamingException {
if (this.jndiName != null) {
try {
this.threadFactory = this.jndiLocator.lookup(this.jndiName, ThreadFactory.class);
}
catch (NamingException ex) {
if (logger.isTraceEnabled()) {
logger.trace("Failed to retrieve [" + this.jndiName + "] from JNDI", ex);
}
logger.info("Could not find default managed thread factory in JNDI - " +
"proceeding with default local thread factory");
}
}
}
@Override
public Thread newThread(Runnable runnable) {
if (this.threadFactory != null) {
return this.threadFactory.newThread(runnable);
}
else {
return super.newThread(runnable);
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-context/src/main/java/org/springframework/scheduling/concurrent/DefaultManagedAwareThreadFactory.java
|
from email.parser import FeedParser
import os
import imp
import locale
import re
import sys
import shutil
import tempfile
import textwrap
import zipfile
from distutils.util import change_root
from pip.locations import (bin_py, running_under_virtualenv,PIP_DELETE_MARKER_FILENAME,
write_delete_marker_file, bin_user)
from pip.exceptions import (InstallationError, UninstallationError, UnsupportedWheel,
BestVersionAlreadyInstalled, InvalidWheelFilename,
DistributionNotFound, PreviousBuildDirError)
from pip.vcs import vcs
from pip.log import logger
from pip.util import (display_path, rmtree, ask, ask_path_exists, backup_dir,
is_installable_dir, is_local, dist_is_local,
dist_in_usersite, dist_in_site_packages, renames,
normalize_path, egg_link_path, make_path_relative,
call_subprocess, is_prerelease, normalize_name)
from pip.backwardcompat import (urlparse, urllib, uses_pycache,
ConfigParser, string_types, HTTPError,
get_python_version, b)
from pip.index import Link
from pip.locations import build_prefix
from pip.download import (PipSession, get_file_content, is_url, url_to_path,
path_to_url, is_archive_file,
unpack_vcs_link, is_vcs_url, is_file_url,
unpack_file_url, unpack_http_url)
import pip.wheel
from pip.wheel import move_wheel_files, Wheel, wheel_ext
from pip._vendor import pkg_resources, six
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preffered system encoding
(e.g., cp1252 on some Windows machines) and latin1, in that order. Decoding
a byte string with latin1 will never raise an error. In the worst case, the
returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, prereleases=None,
editable_options=None, from_bundle=False, pycompile=True):
self.extras = ()
if isinstance(req, string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.url = url
self.as_egg = as_egg
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
self._is_bundle = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.from_bundle = from_bundle
self.pycompile = pycompile
# True if pre-releases are acceptable
if prereleases:
self.prereleases = True
elif self.req is not None:
self.prereleases = any([is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs])
else:
self.prereleases = False
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None):
name, url, extras_override = parse_editable(editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
url=url,
editable_options=extras_override,
prereleases=True)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, prereleases=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
url = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif os.path.isdir(path) and (os.path.sep in name or name.startswith('.')):
if not is_installable_dir(path):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % name)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warn('Requirement %r looks like a filename, but the file does not exist', name)
link = Link(path_to_url(name))
# If the line has an egg= definition, but isn't editable, pull the requirement out.
# Otherwise, assume the name is the req for the non URL/path/archive case.
if link and req is None:
url = link.url_without_fragment
req = link.egg_fragment #when fragment is None, this will become an 'unnamed' requirement
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
# fail early for invalid or unsupported wheels
if link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel on this platform." % wheel.filename)
else:
req = name
return cls(req, comes_from, url=url, prereleases=prereleases)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir, unpack=True):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s' % new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug('Moving package %s from %s to new location %s'
% (self, display_path(old_location), display_path(new_location)))
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return self.req.project_name
@property
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
@property
def setup_py(self):
try:
import setuptools
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self, force_root_egg_info=False):
assert self.source_dir
if self.name:
logger.notify('Running setup.py (path:%s) egg_info for package %s' % (self.setup_py, self.name))
else:
logger.notify('Running setup.py (path:%s) egg_info for package from %s' % (self.setup_py, self.url))
logger.indent += 2
try:
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir if
# no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a specifier
# (not archive path/urls)
# TODO: take this out later
if self.name == 'distribute' and not os.path.isdir(os.path.join(self.source_dir, 'setuptools')):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
egg_info_cmd = [sys.executable, '-c', script, 'egg_info']
# We can't put the .egg-info files at the root, because then the source code will be mistaken
# for an installed egg, causing problems
if self.editable or force_root_egg_info:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False,
command_level=logger.VERBOSE_DEBUG,
command_desc='python setup.py egg_info')
finally:
logger.indent -= 2
if not self.req:
self.req = pkg_resources.Requirement.parse(
"%(Name)s==%(Version)s" % self.pkg_info())
self.correct_build_location()
## FIXME: This is a lame hack, entirely for PasteScript which has
## a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv environment
if (os.path.exists(os.path.join(root, dir, 'bin', 'python'))
or os.path.exists(os.path.join(root, dir, 'Scripts', 'Python.exe'))):
dirs.remove(dir)
# Also don't search through tests
if dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError('No files/directories in %s (from %s)' % (base, filename))
assert filenames, "No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This can
# easily be the case if there is a dist folder which contains an
# extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(key=lambda x: x.count(os.path.sep) +
(os.path.altsep and
x.count(os.path.altsep) or 0))
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO')))
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line.lower())
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
logger.debug('skipping extra %s' % in_extra)
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def absolute_versions(self):
for qualifier, version in self.req.specs:
if qualifier == '==':
yield version
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
version = self.installed_version
if version not in self.req:
logger.warn('Requested %s, but installing version %s' % (self, self.installed_version))
else:
logger.debug('Source in %s has version %s, which satisfies requirement %s'
% (display_path(self.source_dir), version, self))
def update_editable(self, obtain=True):
if not self.url:
logger.info("Cannot update repository at %s; repository location is unknown" % self.source_dir)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
dist_info_path = os.path.join(dist.location,
'-'.join(dist.egg_name().split('-')[:2])
) + '.dist-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
dist_info_exists = os.path.exists(dist_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
#FIXME: need a test for this elif block
#occurs with --single-version-externally-managed/--record outside of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif dist_info_exists:
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_dir, name) + '.exe')
paths_to_remove.add(os.path.join(bin_dir, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_dir, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error("Can't rollback %s, nothing uninstalled."
% (self.project_name,))
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error("Can't commit %s, nothing uninstalled."
% (self.project_name,))
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.installed_version)
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warn('Backing up %s to %s'
% (display_path(archive_path), display_path(dest_file)))
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.indent -= 2
logger.notify('Saved %s' % display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"\
"exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py)
install_args += list(global_options) + ['install','--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
## FIXME: I'm not sure if this is a reasonable location; probably not
## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
'python' + get_python_version())]
logger.notify('Running setup.py install for %s' % self.name)
logger.indent += 2
try:
call_subprocess(install_args + install_options,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False)
finally:
logger.indent -= 2
if not os.path.exists(record_filename):
logger.notify('Record file %s not found' % record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install command
# so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
f = open(record_filename)
for line in f:
line = line.strip()
if line.endswith('.egg-info'):
egg_info_dir = prepend_root(line)
break
else:
logger.warn('Could not find .egg-info directory in install record for %s' % self)
## FIXME: put the record somewhere
## FIXME: should this be an error?
return
f.close()
new_lines = []
f = open(record_filename)
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(make_path_relative(prepend_root(filename), egg_info_dir))
f.close()
f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w')
f.write('\n'.join(new_lines)+'\n')
f.close()
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.is_bundle or os.path.exists(self.delete_marker_filename):
logger.info('Removing source in %s' % self.source_dir)
if self.source_dir:
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.notify('Running setup.py develop for %s' % self.name)
logger.indent += 2
try:
## FIXME: should we do --install-headers here too?
call_subprocess(
[sys.executable, '-c',
"import setuptools, tokenize; __file__=%r; exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py]
+ list(global_options) + ['develop', '--no-deps'] + list(install_options),
cwd=self.source_dir, filter_stdout=self._filter_install,
show_stdout=False)
finally:
logger.indent -= 2
self.install_succeeded = True
def _filter_install(self, line):
level = logger.NOTIFY
for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*',
r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = logger.INFO
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(self.req.project_name)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif running_under_virtualenv() and dist_in_site_packages(existing_dist):
raise InstallationError("Will not install to the user site because it will lack sys.path precedence to %s in %s"
%(existing_dist.project_name, existing_dist.location))
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
@property
def is_bundle(self):
if self._is_bundle is not None:
return self._is_bundle
base = self._temp_build_dir
if not base:
## FIXME: this doesn't seem right:
return False
self._is_bundle = (os.path.exists(os.path.join(base, 'pip-manifest.txt'))
or os.path.exists(os.path.join(base, 'pyinstall-manifest.txt')))
return self._is_bundle
def bundle_requirements(self):
for dest_dir in self._bundle_editable_dirs:
package = os.path.basename(dest_dir)
## FIXME: svnism:
for vcs_backend in vcs.backends:
url = rev = None
vcs_bundle_file = os.path.join(
dest_dir, vcs_backend.bundle_file)
if os.path.exists(vcs_bundle_file):
vc_type = vcs_backend.name
fp = open(vcs_bundle_file)
content = fp.read()
fp.close()
url, rev = vcs_backend().parse_vcs_bundle_file(content)
break
if url:
url = '%s+%s@%s' % (vc_type, url, rev)
else:
url = None
yield InstallRequirement(
package, self, editable=True, url=url,
update=False, source_dir=dest_dir, from_bundle=True)
for dest_dir in self._bundle_build_dirs:
package = os.path.basename(dest_dir)
yield InstallRequirement(package, self,source_dir=dest_dir, from_bundle=True)
def move_bundle_files(self, dest_build_dir, dest_src_dir):
base = self._temp_build_dir
assert base
src_dir = os.path.join(base, 'src')
build_dir = os.path.join(base, 'build')
bundle_build_dirs = []
bundle_editable_dirs = []
for source_dir, dest_dir, dir_collection in [
(src_dir, dest_src_dir, bundle_editable_dirs),
(build_dir, dest_build_dir, bundle_build_dirs)]:
if os.path.exists(source_dir):
for dirname in os.listdir(source_dir):
dest = os.path.join(dest_dir, dirname)
dir_collection.append(dest)
if os.path.exists(dest):
logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s'
% (dest, dirname, self))
continue
if not os.path.exists(dest_dir):
logger.info('Creating directory %s' % dest_dir)
os.makedirs(dest_dir)
shutil.move(os.path.join(source_dir, dirname), dest)
if not os.listdir(source_dir):
os.rmdir(source_dir)
self._temp_build_dir = None
self._bundle_build_dirs = bundle_build_dirs
self._bundle_editable_dirs = bundle_editable_dirs
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
)
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, download_cache=None,
upgrade=False, ignore_installed=False, as_egg=False,
target_dir=None, ignore_dependencies=False,
force_reinstall=False, use_user_site=False, session=None,
pycompile=True, wheel_download_dir=None):
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
if download_cache:
download_cache = os.path.expanduser(download_cache)
self.download_cache = download_cache
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir #set from --target option
self.session = session or PipSession()
self.pycompile = pycompile
self.wheel_download_dir = wheel_download_dir
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
name = install_req.name
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
#url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
## FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def has_editables(self):
if any(req.editable for req in self.requirements.values()):
return True
if any(req.editable for req in self.unnamed_requirements):
return True
return False
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.fatal('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
## FIXME: duplicates code from prepare_files; relevant code should
## probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
#don't uninstall conflict if user install and and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(self.src_dir)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(self.build_dir, not self.is_download)
if req_to_install.source_dir is not None and not os.path.isdir(req_to_install.source_dir):
raise InstallationError('Could not install requirement %s '
'because source folder %s does not exist '
'(perhaps --no-download was used without first running '
'an equivalent install with --no-install?)'
% (req_to_install, req_to_install.source_dir))
def prepare_files(self, finder, force_root_egg_info=False, bundle=False):
"""Prepare process. Create temp directories, download and/or unpack files."""
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound:
not_found = sys.exc_info()[1]
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
#don't uninstall conflict if user install and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.notify('Requirement already up-to-date: %s'
% req_to_install)
else:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
logger.notify('Obtaining %s' % req_to_install)
elif install:
if req_to_install.url and req_to_install.url.lower().startswith('file:'):
logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url)))
else:
logger.notify('Downloading/unpacking %s' % req_to_install)
logger.indent += 2
try:
is_bundle = False
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
##@@ if filesystem packages are not marked
##editable in a req, a non deterministic error
##occurs when the script attempts to unpack the
##build directory
# NB: This call can result in the creation of a temporary build directory
location = req_to_install.build_location(self.build_dir, not self.is_download)
unpack = True
url = None
# In the case where the req comes from a bundle, we should
# assume a build dir exists and move on
if req_to_install.from_bundle:
pass
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
elif os.path.exists(os.path.join(location, 'setup.py')):
raise PreviousBuildDirError(textwrap.dedent("""
pip can't proceed with requirement '%s' due to a pre-existing build directory.
location: %s
This is likely due to a previous installation that failed.
pip is being responsible and not assuming it can delete this.
Please delete it and try again.
""" % (req_to_install, location)))
else:
## FIXME: this won't upgrade when there's an existing package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(req_to_install, upgrade=self.upgrade)
else:
## FIXME: should req_to_install.url already be a link?
url = Link(req_to_install.url)
assert url
if url:
try:
if (
url.filename.endswith(wheel_ext)
and self.wheel_download_dir
):
# when doing 'pip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
self.unpack_url(
url, location, download_dir,
do_download,
)
except HTTPError as exc:
logger.fatal(
'Could not install requirement %s because '
'of error %s' % (req_to_install, exc)
)
raise InstallationError(
'Could not install requirement %s because of HTTP error %s for URL %s'
% (req_to_install, e, url))
else:
unpack = False
if unpack:
is_bundle = req_to_install.is_bundle
is_wheel = url and url.filename.endswith(wheel_ext)
if is_bundle:
req_to_install.move_bundle_files(self.build_dir, self.src_dir)
for subreq in req_to_install.bundle_requirements():
reqs.append(subreq)
self.add_requirement(subreq)
elif self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME: see https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
if force_root_egg_info:
# We need to run this to make sure that the .egg-info/
# directory is created for packing in the bundle
req_to_install.run_egg_info(force_root_egg_info=True)
req_to_install.assert_source_matches_version()
#@@ sketchy way of identifying packages not grabbed from an index
if bundle and req_to_install.url:
self.copy_to_build_dir(req_to_install)
install = False
# req_to_install.req is only avail after unpack for URL pkgs
# repeat check_if_exists to uninstall-on-upgrade (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
#don't uninstall conflict if user install and and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.notify(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s' %
req_to_install
)
install = False
if is_wheel:
dist = list(
pkg_resources.find_distributions(location)
)[0]
if not req_to_install.req:
req_to_install.req = dist.as_requirement()
self.add_requirement(req_to_install)
if not self.ignore_dependencies:
for subreq in dist.requires(
req_to_install.extras):
if self.has_requirement(
subreq.project_name):
continue
subreq = InstallRequirement(str(subreq),
req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
# sdists
elif not is_bundle:
## FIXME: shouldn't be globally added:
finder.add_dependency_links(req_to_install.dependency_links)
if (req_to_install.extras):
logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras))
if not self.ignore_dependencies:
for req in req_to_install.requirements(req_to_install.extras):
try:
name = pkg_resources.Requirement.parse(req).project_name
except ValueError:
e = sys.exc_info()[1]
## FIXME: proper warning
logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install))
continue
if self.has_requirement(name):
## FIXME: check for conflict
continue
subreq = InstallRequirement(req, req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
#'unnamed' requirements will get added here
self.add_requirement(req_to_install)
# cleanup tmp src
if not is_bundle:
if (
self.is_download or
req_to_install._temp_build_dir is not None
):
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')):
self.copy_to_build_dir(req_to_install)
finally:
logger.indent -= 2
def cleanup_files(self, bundle=False):
"""Clean up files, remove builds."""
logger.notify('Cleaning up...')
logger.indent += 2
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
remove_dir = []
if self._pip_has_created_build_dir():
remove_dir.append(self.build_dir)
# The source dir of a bundle can always be removed.
# FIXME: not if it pre-existed the bundle!
if bundle:
remove_dir.append(self.src_dir)
for dir in remove_dir:
if os.path.exists(dir):
logger.info('Removing temporary dir %s...' % dir)
rmtree(dir)
logger.indent -= 2
def _pip_has_created_build_dir(self):
return (self.build_dir == build_prefix and
os.path.exists(os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)))
def copy_to_build_dir(self, req_to_install):
target_dir = req_to_install.editable and self.src_dir or self.build_dir
logger.info("Copying %s to %s" % (req_to_install.name, target_dir))
dest = os.path.join(target_dir, req_to_install.name)
shutil.copytree(req_to_install.source_dir, dest)
call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest,
command_desc='python setup.py clean')
def unpack_url(self, link, location, download_dir=None,
only_download=False):
if download_dir is None:
download_dir = self.download_dir
# non-editable vcs urls
if is_vcs_url(link):
if only_download:
loc = download_dir
else:
loc = location
unpack_vcs_link(link, loc, only_download)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir)
if only_download:
write_delete_marker_file(location)
# http urls
else:
unpack_http_url(
link,
location,
self.download_cache,
download_dir,
self.session,
)
if only_download:
write_delete_marker_file(location)
def install(self, install_options, global_options=(), *args, **kwargs):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if req.name == 'distribute' and req.installed_version in distribute_req:
to_install.remove(req)
to_install.append(req)
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's pkg_resources.
# It's ok *not* to check if setuptools>=0.7 because if someone
# were actually trying to ugrade from distribute to setuptools
# 0.6.X, then all this could do is actually help, although that
# upgade path was certainly never "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options, *args, **kwargs)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install
def create_bundle(self, bundle_filename):
## FIXME: can't decide which is better; zip is easier to read
## random files from, but tar.bz2 is smaller and not as lame a
## format.
## FIXME: this file should really include a manifest of the
## packages, maybe some other metadata files. It would make
## it easier to detect as well.
zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED)
vcs_dirs = []
for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'):
dir = os.path.normcase(os.path.abspath(dir))
for dirpath, dirnames, filenames in os.walk(dir):
for backend in vcs.backends:
vcs_backend = backend()
vcs_url = vcs_rev = None
if vcs_backend.dirname in dirnames:
for vcs_dir in vcs_dirs:
if dirpath.startswith(vcs_dir):
# vcs bundle file already in parent directory
break
else:
vcs_url, vcs_rev = vcs_backend.get_info(
os.path.join(dir, dirpath))
vcs_dirs.append(dirpath)
vcs_bundle_file = vcs_backend.bundle_file
vcs_guide = vcs_backend.guide % {'url': vcs_url,
'rev': vcs_rev}
dirnames.remove(vcs_backend.dirname)
break
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zip.writestr(basename + '/' + name + '/', '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, basename + '/' + name)
if vcs_url:
name = os.path.join(dirpath, vcs_bundle_file)
name = self._clean_zip_name(name, dir)
zip.writestr(basename + '/' + name, vcs_guide)
zip.writestr('pip-manifest.txt', self.bundle_requirements())
zip.close()
BUNDLE_HEADER = '''\
# This is a pip bundle file, that contains many source packages
# that can be installed as a group. You can install this like:
# pip this_file.zip
# The rest of the file contains a list of all the packages included:
'''
def bundle_requirements(self):
parts = [self.BUNDLE_HEADER]
for req in [req for req in self.requirements.values()
if not req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
parts.append('# These packages were installed to satisfy the above requirements:\n')
for req in [req for req in self.requirements.values()
if req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
## FIXME: should we do something with self.unnamed_requirements?
return ''.join(parts)
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
_scheme_re = re.compile(r'^(http|https|file):', re.I)
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None):
if session is None:
session = PipSession()
skip_match = None
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
skip_match = re.compile(skip_regex)
reqs_file_dir = os.path.dirname(os.path.abspath(filename))
filename, content = get_file_content(filename,
comes_from=comes_from,
session=session,
)
for line_number, line in enumerate(content.splitlines()):
line_number += 1
line = line.strip()
# Remove comments from file
line = re.sub(r"(^|\s)#.*$", "", line)
if not line or line.startswith('#'):
continue
if skip_match and skip_match.search(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
if line.startswith('-r'):
req_url = line[2:].strip()
else:
req_url = line[len('--requirement'):].strip().strip('=')
if _scheme_re.search(filename):
# Relative to a URL
req_url = urlparse.urljoin(filename, req_url)
elif not _scheme_re.search(req_url):
req_url = os.path.join(os.path.dirname(filename), req_url)
for item in parse_requirements(req_url, finder, comes_from=filename, options=options, session=session):
yield item
elif line.startswith('-Z') or line.startswith('--always-unzip'):
# No longer used, but previously these were used in
# requirement files, so we'll ignore.
pass
elif line.startswith('-f') or line.startswith('--find-links'):
if line.startswith('-f'):
line = line[2:].strip()
else:
line = line[len('--find-links'):].strip().lstrip('=')
## FIXME: it would be nice to keep track of the source of
## the find_links:
# support a find-links local path relative to a requirements file
relative_to_reqs_file = os.path.join(reqs_file_dir, line)
if os.path.exists(relative_to_reqs_file):
line = relative_to_reqs_file
if finder:
finder.find_links.append(line)
elif line.startswith('-i') or line.startswith('--index-url'):
if line.startswith('-i'):
line = line[2:].strip()
else:
line = line[len('--index-url'):].strip().lstrip('=')
if finder:
finder.index_urls = [line]
elif line.startswith('--extra-index-url'):
line = line[len('--extra-index-url'):].strip().lstrip('=')
if finder:
finder.index_urls.append(line)
elif line.startswith('--use-wheel'):
finder.use_wheel = True
elif line.startswith('--no-index'):
finder.index_urls = []
elif line.startswith("--allow-external"):
line = line[len("--allow-external"):].strip().lstrip("=")
finder.allow_external |= set([normalize_name(line).lower()])
elif line.startswith("--allow-all-external"):
finder.allow_all_external = True
# Remove in 1.7
elif line.startswith("--no-allow-external"):
pass
# Remove in 1.7
elif line.startswith("--no-allow-insecure"):
pass
# Remove after 1.7
elif line.startswith("--allow-insecure"):
line = line[len("--allow-insecure"):].strip().lstrip("=")
finder.allow_unverified |= set([normalize_name(line).lower()])
elif line.startswith("--allow-unverified"):
line = line[len("--allow-unverified"):].strip().lstrip("=")
finder.allow_unverified |= set([normalize_name(line).lower()])
else:
comes_from = '-r %s (line %s)' % (filename, line_number)
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
req = InstallRequirement.from_editable(
line, comes_from=comes_from, default_vcs=options.default_vcs if options else None)
else:
req = InstallRequirement.from_line(line, comes_from, prereleases=getattr(options, "pre", None))
yield req
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
## FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % url_no_extras)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return None, url_no_extras, pkg_resources.Requirement.parse('__placeholder__' + extras).extras
else:
return None, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url beginning with svn+, git+, hg+, or bzr+' % editable_req)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception:
message = sys.exc_info()[1]
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, message))
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError('--editable=%s is not the right format; it must have #egg=Package' % editable_req)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, options
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def _can_uninstall(self):
if not dist_is_local(self.dist):
logger.notify("Not uninstalling %s at %s, outside environment %s"
% (self.dist.project_name, normalize_path(self.dist.location), sys.prefix))
return False
return True
def add(self, path):
path = normalize_path(path)
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created, due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(imp.cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self._can_uninstall():
return
if not self.paths:
logger.notify("Can't uninstall '%s'. No files were found to uninstall." % self.dist.project_name)
return
logger.notify('Uninstalling %s:' % self.dist.project_name)
logger.indent += 2
paths = sorted(self.compact(self.paths))
try:
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.notify(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.notify('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.notify(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.info('Removing file or directory %s' % path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.notify('Successfully uninstalled %s' % self.dist.project_name)
finally:
logger.indent -= 2
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error("Can't roll back %s; was not uninstalled" % self.dist.project_name)
return False
logger.notify('Rolling back uninstall of %s' % self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.info('Replacing %s' % path)
renames(tmp_path, path)
for pth in self.pth:
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError("Cannot remove entries from nonexistent file %s" % pth_file)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if sys.platform == 'win32' and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.info('Removing pth entries from %s:' % self.file)
fh = open(self.file, 'rb')
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
fh.close()
if any(b('\r\n') in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.info('Removing entry: %s' % entry)
lines.remove(b(entry + endline))
except ValueError:
pass
fh = open(self.file, 'wb')
fh.writelines(lines)
fh.close()
def rollback(self):
if self._saved_lines is None:
logger.error('Cannot roll back changes to %s, none were made' % self.file)
return False
logger.info('Rolling %s back to previous state' % self.file)
fh = open(self.file, 'wb')
fh.writelines(self._saved_lines)
fh.close()
return True
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
|
unknown
|
codeparrot/codeparrot-clean
| ||
class FileProxyMixin:
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
@property
def closed(self):
return not self.file or self.file.closed
def readable(self):
if self.closed:
return False
if hasattr(self.file, 'readable'):
return self.file.readable()
return True
def writable(self):
if self.closed:
return False
if hasattr(self.file, 'writable'):
return self.file.writable()
return 'w' in getattr(self.file, 'mode', '')
def seekable(self):
if self.closed:
return False
if hasattr(self.file, 'seekable'):
return self.file.seekable()
return True
def __iter__(self):
return iter(self.file)
|
unknown
|
codeparrot/codeparrot-clean
| ||
## @file
# process GUIDed section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .GenFdsGlobalVariable import FindExtendTool
from CommonDataClass.FdfClass import GuidSectionClassObject
import sys
from Common import EdkLogger
from Common.BuildToolError import *
from .FvImageSection import FvImageSection
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.DataType import *
## generate GUIDed section
#
#
class GuidSection(GuidSectionClassObject) :
## The constructor
#
# @param self The object pointer
#
def __init__(self):
GuidSectionClassObject.__init__(self)
## GenSection() method
#
# Generate GUIDed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf=None, Dict={}, IsMakefile=False):
#
# Generate all section
#
self.KeyStringList = KeyStringList
self.CurrentArchList = GenFdsGlobalVariable.ArchList
if FfsInf is not None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.NameGuid = FfsInf.__ExtendMacro__(self.NameGuid)
self.SectionType = FfsInf.__ExtendMacro__(self.SectionType)
self.CurrentArchList = [FfsInf.CurrentArch]
SectFile = tuple()
SectAlign = []
Index = 0
MaxAlign = None
if self.FvAddr != []:
FvAddrIsSet = True
else:
FvAddrIsSet = False
if self.ProcessRequired in ("TRUE", "1"):
if self.FvAddr != []:
#no use FvAddr when the image is processed.
self.FvAddr = []
if self.FvParentAddr is not None:
#no use Parent Addr when the image is processed.
self.FvParentAddr = None
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' % (SecNum, Index)
# set base address for inside FvImage
if isinstance(Sect, FvImageSection):
if self.FvAddr != []:
Sect.FvAddr = self.FvAddr.pop(0)
self.IncludeFvSection = True
elif isinstance(Sect, GuidSection):
Sect.FvAddr = self.FvAddr
Sect.FvParentAddr = self.FvParentAddr
ReturnSectList, align = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if isinstance(Sect, GuidSection):
if Sect.IncludeFvSection:
self.IncludeFvSection = Sect.IncludeFvSection
if align is not None:
if MaxAlign is None:
MaxAlign = align
if GenFdsGlobalVariable.GetAlignment (align) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = align
if ReturnSectList != []:
if align is None:
align = "1"
for file in ReturnSectList:
SectFile += (file,)
SectAlign.append(align)
if MaxAlign is not None:
if self.Alignment is None:
self.Alignment = MaxAlign
else:
if GenFdsGlobalVariable.GetAlignment (MaxAlign) > GenFdsGlobalVariable.GetAlignment (self.Alignment):
self.Alignment = MaxAlign
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
SectionSuffix['GUIDED']
OutputFile = os.path.normpath(OutputFile)
ExternalTool = None
ExternalOption = None
if self.NameGuid is not None:
ExternalTool, ExternalOption = FindExtendTool(self.KeyStringList, self.CurrentArchList, self.NameGuid)
#
# If not have GUID , call default
# GENCRC32 section
#
if self.NameGuid is None :
GenFdsGlobalVariable.VerboseLogger("Use GenSection function Generate CRC32 Section")
GenFdsGlobalVariable.GenerateSection(OutputFile, SectFile, Section.Section.SectionType[self.SectionType], InputAlign=SectAlign, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
#or GUID not in External Tool List
elif ExternalTool is None:
EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % self.NameGuid)
else:
DummyFile = OutputFile + ".dummy"
#
# Call GenSection with DUMMY section type.
#
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFile, InputAlign=SectAlign, IsMakefile=IsMakefile)
#
# Use external tool process the Output
#
TempFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
'.tmp'
TempFile = os.path.normpath(TempFile)
#
# Remove temp file if its time stamp is older than dummy file
# Just in case the external tool fails at this time but succeeded before
# Error should be reported if the external tool does not generate a new output based on new input
#
if os.path.exists(TempFile) and os.path.exists(DummyFile) and os.path.getmtime(TempFile) < os.path.getmtime(DummyFile):
os.remove(TempFile)
FirstCall = False
CmdOption = '-e'
if ExternalOption is not None:
CmdOption = CmdOption + ' ' + ExternalOption
if not GenFdsGlobalVariable.EnableGenfdsMultiThread:
if self.ProcessRequired not in ("TRUE", "1") and self.IncludeFvSection and not FvAddrIsSet and self.FvParentAddr is not None:
#FirstCall is only set for the encapsulated flash FV image without process required attribute.
FirstCall = True
#
# Call external tool
#
ReturnValue = [1]
if FirstCall:
#first try to call the guided tool with -z option and CmdOption for the no process required guided tool.
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, '-z' + ' ' + CmdOption, ReturnValue)
#
# when no call or first call failed, ReturnValue are not 1.
# Call the guided tool with CmdOption
#
if ReturnValue[0] != 0:
FirstCall = False
ReturnValue[0] = 0
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption)
#
# There is external tool which does not follow standard rule which return nonzero if tool fails
# The output file has to be checked
#
if not os.path.exists(TempFile) :
EdkLogger.error("GenFds", COMMAND_FAILURE, 'Fail to call %s, no output file was generated' % ExternalTool)
FileHandleIn = open(DummyFile, 'rb')
FileHandleIn.seek(0, 2)
InputFileSize = FileHandleIn.tell()
FileHandleOut = open(TempFile, 'rb')
FileHandleOut.seek(0, 2)
TempFileSize = FileHandleOut.tell()
Attribute = []
HeaderLength = None
if self.ExtraHeaderSize != -1:
HeaderLength = str(self.ExtraHeaderSize)
if self.ProcessRequired == "NONE" and HeaderLength is None:
if TempFileSize > InputFileSize:
FileHandleIn.seek(0)
BufferIn = FileHandleIn.read()
FileHandleOut.seek(0)
BufferOut = FileHandleOut.read()
if BufferIn == BufferOut[TempFileSize - InputFileSize:]:
HeaderLength = str(TempFileSize - InputFileSize)
#auto sec guided attribute with process required
if HeaderLength is None:
Attribute.append('PROCESSING_REQUIRED')
FileHandleIn.close()
FileHandleOut.close()
if FirstCall and 'PROCESSING_REQUIRED' in Attribute:
# Guided data by -z option on first call is the process required data. Call the guided tool with the real option.
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption)
#
# Call Gensection Add Section Header
#
if self.ProcessRequired in ("TRUE", "1"):
if 'PROCESSING_REQUIRED' not in Attribute:
Attribute.append('PROCESSING_REQUIRED')
if self.AuthStatusValid in ("TRUE", "1"):
Attribute.append('AUTH_STATUS_VALID')
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute, GuidHdrLen=HeaderLength)
else:
#add input file for GenSec get PROCESSING_REQUIRED
GenFdsGlobalVariable.GuidTool(TempFile, [DummyFile], ExternalTool, CmdOption, IsMakefile=IsMakefile)
Attribute = []
HeaderLength = None
if self.ExtraHeaderSize != -1:
HeaderLength = str(self.ExtraHeaderSize)
if self.AuthStatusValid in ("TRUE", "1"):
Attribute.append('AUTH_STATUS_VALID')
if self.ProcessRequired == "NONE" and HeaderLength is None:
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute,
GuidHdrLen=HeaderLength, DummyFile=DummyFile, IsMakefile=IsMakefile)
else:
if self.ProcessRequired in ("TRUE", "1"):
if 'PROCESSING_REQUIRED' not in Attribute:
Attribute.append('PROCESSING_REQUIRED')
GenFdsGlobalVariable.GenerateSection(OutputFile, [TempFile], Section.Section.SectionType['GUIDED'],
Guid=self.NameGuid, GuidAttr=Attribute,
GuidHdrLen=HeaderLength, IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
if 'PROCESSING_REQUIRED' in Attribute:
# reset guided section alignment to none for the processed required guided data
self.Alignment = None
self.IncludeFvSection = False
self.ProcessRequired = "TRUE"
if IsMakefile and self.Alignment is not None and self.Alignment.strip() == '0':
self.Alignment = '1'
return OutputFileList, self.Alignment
|
unknown
|
codeparrot/codeparrot-clean
| ||
prelude: |
max, min = 1000.0, -1000.0
a = Complex(rand(max)+min, rand(max)+min)
b = Complex(rand(max)+min, rand(max)+min)
benchmark:
complex_float_add: c = a + b
loop_count: 1000000
|
unknown
|
github
|
https://github.com/ruby/ruby
|
benchmark/complex_float_add.yml
|
from PySide import QtCore, QtGui
from androguard.session import Session
from androguard.core import androconf
from androguard.gui.fileloading import FileLoadingThread
from androguard.gui.treewindow import TreeWindow
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.stringswindow import StringsWindow
from androguard.gui.helpers import class2func
import os
class MainWindow(QtGui.QMainWindow):
'''Main window:
self.central: QTabWidget in center area
self.dock: QDockWidget in left area
self.tree: TreeWindow(QTreeWidget) in self.dock
'''
def __init__(self, parent=None, session=Session(), input_file=None):
super(MainWindow, self).__init__(parent)
self.session = session
self.setupSession()
self.setupFileMenu()
self.setupViewMenu()
self.setupHelpMenu()
self.setupCentral()
self.setupEmptyTree()
self.setupDock()
self.setWindowTitle("Androguard GUI")
self.showStatus("Androguard GUI")
if input_file != None:
self.openFile(input_file)
def showStatus(self, msg):
'''Helper function called by any window to display a message
in status bar.
'''
androconf.debug(msg)
self.statusBar().showMessage(msg)
def about(self):
'''User clicked About menu. Display a Message box.'''
QtGui.QMessageBox.about(self, "About Androguard GUI",
"<p><b>Androguard GUI</b> is basically a GUI for Androguard :)." \
"<br>Have fun !</p>")
def setupSession(self):
self.fileLoadingThread = FileLoadingThread(self.session)
self.connect(self.fileLoadingThread, QtCore.SIGNAL("loadedFile(bool)"),
self.loadedFile)
def loadedFile(self, success):
if not success:
self.showStatus("Analysis of %s failed :(" %
str(self.fileLoadingThread.file_path))
return
self.updateDockWithTree()
self.cleanCentral()
self.showStatus("Analysis of %s done!" %
str(self.fileLoadingThread.file_path))
def openFile(self, path=None):
'''User clicked Open menu. Display a Dialog to ask which file to open.'''
self.session.reset()
if not path:
path = QtGui.QFileDialog.getOpenFileName(
self, "Open File", '',
"Android Files (*.apk *.jar *.dex *.odex *.dey);;Androguard Session (*.ag)")
path = str(path[0])
if path:
self.setupTree()
self.showStatus("Analyzing %s..." % str(path))
self.fileLoadingThread.load(path)
def addFile(self, path=None):
'''User clicked Open menu. Display a Dialog to ask which APK to open.'''
if not self.session.isOpen():
return
if not path:
path = QtGui.QFileDialog.getOpenFileName(
self, "Add File", '',
"Android Files (*.apk *.jar *.dex *.odex *.dey)")
path = str(path[0])
if path:
self.showStatus("Analyzing %s..." % str(path))
self.fileLoadingThread.load(path)
def saveFile(self, path=None):
'''User clicked Save menu. Display a Dialog to ask whwre to save.'''
if not path:
path = QtGui.QFileDialog.getSaveFileName(
self, "Save File", '', "Androguard Session (*.ag)")
path = str(path[0])
if path:
self.showStatus("Saving %s..." % str(path))
self.saveSession(path)
def saveSession(self, path):
'''Save androguard session.'''
try:
self.session.save(path)
except RuntimeError, e:
androconf.error(str(e))
# http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
androconf.error("Try increasing sys.recursionlimit")
os.remove(path)
androconf.warning("Session not saved")
def quit(self):
'''Clicked in File menu to exit or CTRL+Q to close main window'''
QtGui.qApp.quit()
def closeEvent(self, event):
'''Clicked [x] to close main window'''
event.accept()
def setupEmptyTree(self):
'''Setup empty Tree at startup. '''
if hasattr(self, "tree"):
del self.tree
self.tree = QtGui.QTreeWidget(self)
self.tree.header().close()
def setupDock(self):
'''Setup empty Dock at startup. '''
self.dock = QtGui.QDockWidget("Classes", self)
self.dock.setWidget(self.tree)
self.dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock)
def setupTree(self):
androconf.debug("Setup Tree")
self.tree = TreeWindow(win=self, session=self.session)
self.tree.setWindowTitle("Tree model")
self.dock.setWidget(self.tree)
def setupCentral(self):
'''Setup empty window supporting tabs at startup. '''
self.central = QtGui.QTabWidget()
self.central.setTabsClosable(True)
self.central.tabCloseRequested.connect(self.tabCloseRequestedHandler)
self.central.currentChanged.connect(self.currentTabChanged)
self.setCentralWidget(self.central)
def tabCloseRequestedHandler(self, index):
self.central.removeTab(index)
def currentTabChanged(self, index):
androconf.debug("curentTabChanged -> %d" % index)
if index == -1:
return # all tab closed
def cleanCentral(self):
#TOFIX: Removes all the pages, but does not delete them.
self.central.clear()
def setupFileMenu(self):
fileMenu = QtGui.QMenu("&File", self)
self.menuBar().addMenu(fileMenu)
fileMenu.addAction("&Open...", self.openFile, "Ctrl+O")
fileMenu.addAction("&Add...", self.addFile, "Ctrl+A")
fileMenu.addAction("&Save...", self.saveFile, "Ctrl+S")
fileMenu.addAction("E&xit", self.quit, "Ctrl+Q")
def setupViewMenu(self):
viewMenu = QtGui.QMenu("&View", self)
self.menuBar().addMenu(viewMenu)
viewMenu.addAction("&Strings...", self.openStringsWindow)
def setupHelpMenu(self):
helpMenu = QtGui.QMenu("&Help", self)
self.menuBar().addMenu(helpMenu)
helpMenu.addAction("&About", self.about)
helpMenu.addAction("About &Qt", QtGui.qApp.aboutQt)
def updateDockWithTree(self, empty=False):
'''Update the classes tree. Called when
- a new APK has been imported
- a classe has been renamed (displayed in the tree)
'''
self.setupTree()
self.tree.fill()
def openStringsWindow(self):
stringswin = StringsWindow(win=self, session=self.session)
self.central.addTab(stringswin, stringswin.title)
self.central.setTabToolTip(self.central.indexOf(stringswin),
stringswin.title)
self.central.setCurrentWidget(stringswin)
def openBytecodeWindow(self, current_class, method=None):
pass #self.central.setCurrentWidget(sourcewin)
def openSourceWindow(self, current_class, method=None):
'''Main function to open a .java source window
It checks if it already opened and open that tab,
otherwise, initialize a new window.
'''
androconf.debug("openSourceWindow for %s" % current_class)
sourcewin = self.getMeSourceWindowIfExists(current_class)
if not sourcewin:
current_filename = self.session.get_filename_by_class(current_class)
current_digest = self.session.get_digest_by_class(current_class)
sourcewin = SourceWindow(win=self,
current_class=current_class,
current_title=current_class.current_title,
current_filename=current_filename,
current_digest=current_digest,
session=self.session)
sourcewin.reload_java_sources()
self.central.addTab(sourcewin, sourcewin.title)
self.central.setTabToolTip(self.central.indexOf(sourcewin),
current_class.get_name())
if method:
sourcewin.browse_to_method(method)
self.central.setCurrentWidget(sourcewin)
def getMeSourceWindowIfExists(self, current_class):
'''Helper for openSourceWindow'''
for idx in range(self.central.count()):
if current_class.get_name() == self.central.tabToolTip(idx):
androconf.debug("Tab %s already opened at: %d" %
(current_class.get_name(), idx))
return self.central.widget(idx)
return None
def doesClassExist(self, path):
arg = class2func(path)
try:
getattr(self.d, arg)
except AttributeError:
return False
return True
|
unknown
|
codeparrot/codeparrot-clean
| ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import SearchAPIResults, SearchAPIRun
"""SearchApi.io API Toolkit."""
"""Tool for the SearchApi.io Google SERP API."""
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SearchAPIResults": "langchain_community.tools",
"SearchAPIRun": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SearchAPIResults",
"SearchAPIRun",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/tools/searchapi/__init__.py
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for Cells4."""
import sys
import unittest2 as unittest
from nupic.math import lgamma
class LGammaTest(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith("win32"),
"Skipping failed test on Windows.")
def testLgamma(self):
items = (
(0.1, 2.25271265),
(0.2, 1.52406382),
(0.3, 1.09579799),
(0.4, 0.79667782),
(0.5, 0.57236494),
(0.6, 0.39823386),
(0.7, 0.26086725),
(0.8, 0.15205968),
(0.9, 0.06637624),
(1.0, 0.00000000),
(1.1, -0.04987244),
(1.2, -0.08537409),
(1.3, -0.10817481),
(1.4, -0.11961291),
(1.5, -0.12078224),
(1.6, -0.11259177),
(1.7, -0.09580770),
(1.8, -0.07108387),
(1.9, -0.03898428),
(2.0, 0.00000000),
(2.1, 0.04543774),
(2.2, 0.09694747),
(2.3, 0.15418945),
(2.4, 0.21685932),
(2.5, 0.28468287),
(2.6, 0.35741186),
(2.7, 0.43482055),
(2.8, 0.51670279),
(2.9, 0.60286961),
(3.0, 0.69314718),
)
for v, lg in items:
print v, lg, lgamma(v)
self.assertLessEqual(abs(lgamma(v) - lg), 1.0e-8,
"log Gamma(%f) = %f; lgamma(%f) -> %f" % (
v, lg, v, lgamma(v)))
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AzureCogsText2SpeechTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"AzureCogsText2SpeechTool": "langchain_community.tools"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureCogsText2SpeechTool",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/tools/azure_cognitive_services/text2speech.py
|
import sys
import argparse
def evaluateIdentifier(gold, pred):
"""
Performs an intrinsic evaluation of a Complex Word Identification approach.
@param gold: A vector containing gold-standard labels.
@param pred: A vector containing predicted labels.
@return: Precision, Recall and F-1.
"""
#Initialize variables:
precisionc = 0
precisiont = 0
recallc = 0
recallt = 0
#Calculate measures:
for i in range(0, len(gold)):
gold_label = gold[i]
predicted_label = pred[i]
if gold_label==predicted_label:
precisionc += 1
if gold_label==1:
recallc += 1
if gold_label==1:
recallt += 1
precisiont += 1
precision = float(precisionc)/float(precisiont)
recall = float(recallc)/float(recallt)
fmean = 0.0
if precision==0.0 and recall==0.0:
fmean = 0.0
else:
fmean = 2*(precision*recall)/(precision+recall)
#Return measures:
return precision, recall, fmean
if __name__=='__main__':
#Parse arguments:
description = 'Evaluation script for Task 11: Complex Word Identification.'
description += ' The gold-standard file is a dataset with labels in the format provided by the task organizers.'
description += ' The predicted labels file must contain one label 0 or 1 per line, and must have the same number of lines as the gold-standard.'
epilog = 'Returns: Precision, Recall and F1.'
parser=argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('--gold', required=True, help='File containing dataset with gold-standard labels.')
parser.add_argument('--pred', required=True, help='File containing predicted labels.')
args = vars(parser.parse_args())
#Retrieve labels:
gold = [int(line.strip().split('\t')[3]) for line in open(args['gold'])]
pred = [int(line.strip()) for line in open(args['pred'])]
#Calculate scores:
p, r, f = evaluateIdentifier(gold, pred)
#Present scores:
print('Precision: ' + str(p))
print('Recall: ' + str(r))
print('F1: ' + str(f))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import json
from datetime import datetime
from django.test import TestCase
from pytz import UTC
from track.utils import DateTimeJSONEncoder
class TestDateTimeJSONEncoder(TestCase):
def test_datetime_encoding(self):
a_naive_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000)
a_tz_datetime = datetime(2012, 05, 01, 07, 27, 10, 20000, tzinfo=UTC)
a_date = a_naive_datetime.date()
an_iso_datetime = '2012-05-01T07:27:10.020000+00:00'
an_iso_date = '2012-05-01'
obj = {
'number': 100,
'string': 'hello',
'object': {'a': 1},
'a_datetime': a_naive_datetime,
'a_tz_datetime': a_tz_datetime,
'a_date': a_date,
}
to_json = json.dumps(obj, cls=DateTimeJSONEncoder)
from_json = json.loads(to_json)
self.assertEqual(from_json['number'], 100)
self.assertEqual(from_json['string'], 'hello')
self.assertEqual(from_json['object'], {'a': 1})
self.assertEqual(from_json['a_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_tz_datetime'], an_iso_datetime)
self.assertEqual(from_json['a_date'], an_iso_date)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 Sascha Peilicke <saschpe@gmx.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import socket
import subprocess
import urllib2
from django.core.exceptions import ValidationError
from django.db import models
from lxml import etree
class DomainManager(models.Manager):
"""Custom Domain model manager.
"""
def create_from_vir_domain(self, vir_domain):
domain = Domain()
domain.update(vir_domain)
if domain.state is Domain.RUNNING:
Interface.objects.update_all_for_domain_from_vir_domain(domain, vir_domain)
Allocation.objects.update_all_from_domain(domain)
return domain
def update_or_create_from_vir_domain(self, vir_domain):
try:
domain = Domain.objects.get(name=vir_domain.name())
domain.update(vir_domain)
if domain.state is Domain.RUNNING:
Interface.objects.update_all_for_domain_from_vir_domain(domain, vir_domain)
Allocation.objects.update_all_from_domain(domain)
except Domain.DoesNotExist:
domain = self.create_from_vir_domain(vir_domain)
return domain
def update_or_create_all_from_libvirt(self, libvirt_connection):
# Iterate over active domains:
for domain_id in libvirt_connection.listDomainsID():
vir_domain = libvirt_connection.lookupByID(domain_id)
self.update_or_create_from_vir_domain(vir_domain)
# Iterate over defined domains:
for domain_name in libvirt_connection.listDefinedDomains():
vir_domain = libvirt_connection.lookupByName(domain_name)
self.update_or_create_from_vir_domain(vir_domain)
class Domain(models.Model):
"""Virtual machine (libvirt domain).
"""
NOSTATE = 0
RUNNING = 1
BLOCKED = 2
PAUSED = 3
SHUTDOWN = 4
SHUTOFF = 5
CRASHED = 6
PMSUSPENDED = 7
STATE_CHOICES = (
(NOSTATE, "No State"),
(RUNNING, "Running"),
(BLOCKED, "Blocked"),
(PAUSED, "Paused"),
(SHUTDOWN, "Shut Down"),
(SHUTOFF, "Shut Off"),
(CRASHED, "Crahed"),
(PMSUSPENDED, "PM Suspended"),
)
name = models.CharField(max_length=64, primary_key=True)
id = models.PositiveIntegerField(null=True, unique=True)
state = models.PositiveIntegerField(choices=STATE_CHOICES, default=NOSTATE)
max_memory = models.PositiveIntegerField(default=2097152)
memory = models.PositiveIntegerField(default=2097152)
vcpus = models.PositiveIntegerField(default=1, verbose_name="virtual CPUs", help_text="Should not exceed physical CPUs")
cpu_time = models.BigIntegerField(default=0, verbose_name="CPU time")
network_set = models.ManyToManyField("Network", through="Interface")
service_set = models.ManyToManyField("Service", through="Allocation")
objects = DomainManager()
class Meta:
ordering = ("name",)
def __unicode__(self):
return self.name
def clean(self):
if self.memory > self.max_memory:
raise ValidationError("Domain memory must be not be greater than max_memory.")
if self.cpu_time < 0:
raise ValidationError("Negative CPU time is not possible!")
def update(self, vir_domain, save=True):
self.name = vir_domain.name()
if vir_domain.ID() is not -1:
self.id = vir_domain.ID()
else:
self.id = None
domain_info = vir_domain.info()
self.state = domain_info[0]
self.max_memory = domain_info[1]
self.memory = domain_info[2]
self.vcpus = domain_info[3]
self.cpu_time = domain_info[4]
if save:
self.save()
class InterfaceManager(models.Manager):
"""Custom Interface model manager.
"""
def update_all_for_domain_from_vir_domain(self, domain, vir_domain):
# Parse network interfaces from XML description
domain.interface_set.all().delete()
xml = etree.fromstring(vir_domain.XMLDesc(0))
for xml_if in xml.findall("devices/interface[@type='network']"):
mac_address = xml_if.find("mac").attrib["address"].lower().strip()
network_name = xml_if.find("source").attrib["network"].lower().strip()
# Make sure there's a suitable Network instance available:
network = Network.objects.get_or_create(name=network_name)[0]
# libvirt provides no means to lookup domain ip addresses, thus:
# Check /etc/hosts:
ip_address = None
try:
ping_output = subprocess.check_output(["ping", "-c1", domain.name])
try:
ip_address = re.search("\(((\d+\.){3}\d+)\)", ping_output).groups()[0]
except AttributeError:
pass
except subprocess.CalledProcessError:
# Try arp cache:
arp_output = subprocess.check_output(["arp", "-n"])
for line in arp_output.split("\n")[1:]:
try:
ip, mac = re.match("([\d\.]+)\s+\w+\s+([\d:]+)", line).groups()
if mac is mac_address:
ip_address = ip # Found it!
break
except AttributeError:
pass
Interface.objects.create(domain=domain, network=network,
mac_address=mac_address, ip_address=ip_address)
class Interface(models.Model):
"""Network interface, also a many-to-many relation between domains and networks.
"""
domain = models.ForeignKey("Domain")
network = models.ForeignKey("Network")
mac_address = models.CharField(max_length=17, verbose_name="MAC Address")
ip_address = models.IPAddressField(blank=True, null=True, verbose_name="IP Address")
objects = InterfaceManager()
class Meta:
ordering = ("domain", "mac_address")
def __unicode__(self):
return self.mac_address
class NetworkManager(models.Manager):
"""Custom Network model manager.
"""
def create_from_vir_network(self, vir_network):
network = Network()
network.update(vir_network)
return network
def update_or_create_from_vir_network(self, vir_network):
try:
network = Network.objects.get(name=vir_network.name())
network.update(vir_network)
except Network.DoesNotExist:
network = self.create_from_vir_network(vir_network)
return network
def update_or_create_all_from_libvirt(self, libvirt_connection):
# Iterate over active networks:
for network_name in libvirt_connection.listNetworks():
vir_network = libvirt_connection.networkLookupByName(network_name)
self.update_or_create_from_vir_network(vir_network)
# Iterate over defined networks:
for network_name in libvirt_connection.listDefinedNetworks():
vir_network = libvirt_connection.networkLookupByName(network_name)
self.update_or_create_from_vir_network(vir_network)
class Network(models.Model):
"""Virtual network.
"""
name = models.CharField(max_length=32, primary_key=True)
bridge_name = models.CharField(default="virbr0", max_length=16)
forward_mode = models.CharField(default="nat", max_length=32)
domain_name = models.CharField(max_length=256)
active = models.BooleanField(default=False)
persistent = models.BooleanField(default=False)
objects = NetworkManager()
class Meta:
ordering = ("name",)
def __unicode__(self):
return self.name
def update(self, vir_network, save=True):
self.name = vir_network.name()
self.bridge_name = vir_network.bridgeName()
xml = etree.fromstring(vir_network.XMLDesc(0))
if xml.find("forward") is not None:
self.forward_mode = xml.find("forward").attrib["mode"]
else:
self.forward_mode = ""
if xml.find("domain") is not None: # <domain name=""> is optional
self.domain_name = xml.find("domain").attrib["name"]
else:
self.domain_name = ""
self.active = vir_network.isActive()
self.persistent = vir_network.isPersistent()
if save:
self.save()
class Service(models.Model):
"""Web service provided by virtual machine (libvirt domain).
"""
HTTP = 0
HTTPS = 1
PROTOCOL_CHOICES = (
(HTTP, "HTTP"),
(HTTPS, "HTTPS"),
)
name = models.CharField(max_length=128)
description = models.TextField()
port = models.PositiveIntegerField(verbose_name="Default Port")
protocol = models.PositiveIntegerField(choices=PROTOCOL_CHOICES, default=HTTP)
class Meta:
ordering = ("port", "name")
def __unicode__(self):
return self.name
class AllocationManager(models.Manager):
"""Custom Allocation model manager.
"""
def update_all_from_domain(self, domain):
for allocation in domain.allocation_set.all():
allocation.update()
class Allocation(models.Model):
"""Many-to-many relation between domains (virtual machines) and services.
"""
domain = models.ForeignKey(Domain)
service = models.ForeignKey(Service)
running = models.BooleanField(default=False)
objects = AllocationManager()
class Meta:
ordering = ("domain", "service")
def __unicode__(self):
return u"{0} {1}".format(self.domain, self.service)
def url(self):
protocol = self.service.get_protocol_display().lower()
ip_address = self.domain.interface_set.all()[0].ip_address
return u"{0}://{1}:{2}/".format(protocol,
ip_address,
self.service.port)
def external_url(self):
protocol = self.service.get_protocol_display().lower()
#TODO: Parse iptables and find out host port forwarding
forwarding_port = 8080
return u"{0}://{1}:{2}/".format(protocol, socket.getfqdn(), forwarding_port)
def update(self, save=True):
try:
response = urllib2.urlopen(self.url())
self.running = response.getcode() is 200
except (urllib2.HTTPError, urllib2.URLError):
self.running = False
if save:
self.save()
|
unknown
|
codeparrot/codeparrot-clean
| ||
__all__ = ["send",
"track_send_item",
"send_shipment",
"receive",
"track_recv_item",
"recv_shipment",
"recv_sent_shipment",
"send_rec",
"send_get_id",
"send_get_ref",
"recv_rec",
"recv_get_id",
"dbcallback_getStockLevels",
]
from gluon import current
from s3 import s3_debug
from tests.web2unittest import SeleniumUnitTest
class InvTestFunctions(SeleniumUnitTest):
def send(self, user, data):
"""
@case: INV
@description: Functions which runs specific workflows for Inventory tes
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/send/create")
table = "inv_send"
result = self.create(table, data)
s3_debug("WB reference: %s" % self.send_get_ref(result))
return result
# -------------------------------------------------------------------------
def track_send_item(self, user, send_id, data, removed=True):
"""
Helper method to add a track item to the inv_send with the
given send_id by the given user
"""
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
self.login(account=user, nexturl="inv/send/%s/track_item" % send_id)
table = "inv_track_item"
result = self.create(table, data, dbcallback = self.dbcallback_getStockLevels)
# Get the last record in the before & after
# this will give the stock record which has been added to the end by
# the getStockLevels callback
if removed:
qnty = 0
for line in data:
if line[0] == "quantity":
qnty = float(line[1])
break
stock_before = result["before"].records[len(result["before"])-1].quantity
stock_after = result["after"].records[len(result["after"])-1].quantity
stock_shipped = qnty
self.assertTrue( stock_before - stock_after == stock_shipped, "Warehouse stock not properly adjusted, was %s should be %s but is recorded as %s" % (stock_before, stock_after, stock_before - stock_shipped))
s3_debug ("Stock level before %s, stock level after %s" % (stock_before, stock_after))
return result
# -------------------------------------------------------------------------
def send_shipment(self, user, send_id):
"""
Helper method to send a shipment with id of send_id
"""
db = current.db
s3db = current.s3db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 0, "Shipment is not status preparing")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 1, "Shipment item is not status preparing")
s3_debug("Shipment items are all of status: preparing")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send_process/%s" % send_id)
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# -------------------------------------------------------------------------
def confirm_received_shipment(self, user, send_id):
"""
Helper method to confirm that a shipment has been received
outside of the system. This means that the items in the
shipment will not be recorded as being at a site but
the status of the shipment will be modified.
"""
db = current.db
s3db = current.s3db
stable = s3db.inv_send
ititable = s3db.inv_track_item
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 2, "Shipment is not status sent")
s3_debug("Shipment status is: preparing")
for rec in item_records:
self.assertTrue(rec.status == 2, "Shipment item is not status sent")
s3_debug("Shipment items are all of status: sent")
# Now send the shipment on its way
self.login(account=user, nexturl="inv/send/%s?received=True" % send_id)
# Get the current status
query = (stable.id == send_id)
record = db(query).select(stable.status,
limitby=(0, 1)).first()
send_status = record.status
query = (ititable.send_id == send_id)
item_records = db(query).select(ititable.status)
# check that the status is correct
self.assertTrue(send_status == 1, "Shipment is not status received")
s3_debug("Shipment status is: sent")
for rec in item_records:
self.assertTrue(rec.status == 4, "Shipment item is not status arrived")
s3_debug("Shipment items are all of status: arrived")
# -------------------------------------------------------------------------
def receive(self, user, data):
"""
Helper method to add a inv_send record by the given user
"""
self.login(account=user, nexturl="inv/recv/create")
table = "inv_recv"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def track_recv_item(self, user, recv_id, data, removed=True):
"""
Helper method to add a track item to the inv_recv with the
given recv_id
"""
try:
add_btn = self.browser.find_element_by_id("show-add-btn")
if add_btn.is_displayed():
add_btn.click()
except:
pass
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
table = "inv_track_item"
result = self.create(table, data)
return result
# -------------------------------------------------------------------------
def recv_shipment(self, user, recv_id, data):
"""
Helper method that will receive the shipment, adding the
totals that arrived
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
db = current.db
s3db = current.s3db
rvtable = s3db.inv_recv
iitable = s3db.inv_inv_item
# First get the site_id
query = (rvtable.id == recv_id)
record = db(query).select(rvtable.site_id,
limitby=(0, 1)).first()
site_id = record.site_id
# Now get all the inventory items for the site
query = (iitable.site_id == site_id)
before = db(query).select(orderby=iitable.id)
self.login(account=user, nexturl="inv/recv_process/%s" % recv_id)
query = (iitable.site_id == site_id)
after = db(query).select(orderby=iitable.id)
# Find the differences between the before and the after
changes = []
for a_rec in after:
found = False
for b_rec in before:
if a_rec.id == b_rec.id:
if a_rec.quantity != b_rec.quantity:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity - b_rec.quantity)
)
found = True
break
if not found:
changes.append(
(a_rec.item_id,
a_rec.item_pack_id,
a_rec.quantity)
)
# changes now contains the list of changed or new records
# these should match the records received
# first check are the lengths the same?
self.assertTrue(len(data) == len(changes),
"The number of changed inventory items (%s) doesn't match the number of items received (%s)." %
(len(changes), len(data))
)
for line in data:
rec = line["record"]
found = False
for change in changes:
if rec.inv_track_item.item_id == change[0] and \
rec.inv_track_item.item_pack_id == change[1] and \
rec.inv_track_item.quantity == change[2]:
found = True
break
if found:
s3_debug("%s accounted for." % line["text"])
else:
s3_debug("%s not accounted for." % line["text"])
# -------------------------------------------------------------------------
def recv_sent_shipment(self, method, user, WB_ref, item_list):
"""
Helper method that will receive the sent shipment.
This supports two methods:
method = "warehouse"
====================
This requires going to the receiving warehouse
Selecting the shipment (using the WB reference)
Opening each item and selecting the received totals
Then receive the shipment
method = "search"
====================
Search for all received shipments
Select the matching WB reference
Opening each item and selecting the received totals
Then receive the shipment
Finally:
It will get the stock in the warehouse before and then after
and check that the stock levels have been properly increased
"""
browser = self.browser
if method == "search":
self.login(account=user, nexturl="inv/recv/search")
# Find the WB reference in the dataTable (filter so only one is displayed)
el = browser.find_element_by_id("recv_search_simple")
el.send_keys(WB_ref)
# Submit the search
browser.find_element_by_css_selector("input[type='submit']").submit()
# Select the only row in the dataTable
if not self.dt_action():
fail("Unable to select the incoming shipment with reference %s" % WB_ref)
elif method == "warehouse":
return # not yet implemented
else:
fail("Unknown method of %s" % method)
return # invalid method
#####################################################
# We are now viewing the details of the receive item
#####################################################
# Now get the recv id from the url
url = browser.current_url
url_parts = url.split("/")
try:
recv_id = int(url_parts[-1])
except:
recv_id = int(url_parts[-2])
# Click on the items tab
self.login(account=user, nexturl="inv/recv/%s/track_item" % recv_id)
data = []
for item in item_list:
# Find the item in the dataTable
self.dt_filter(item[0])
self.dt_action()
el = browser.find_element_by_id("inv_track_item_recv_quantity")
el.send_keys(item[1])
text = "%s %s" % (item[1], item[0])
data.append({"text" : text,
"record" : item[2]})
# Save the form
browser.find_element_by_css_selector("input[type='submit']").submit()
# Now receive the shipment and check the totals
self.recv_shipment(user, recv_id, data)
# -------------------------------------------------------------------------
# Functions which extract data from the create results
#
def send_rec(self, result):
"""
Simple helper function to get the newly created inv_send row
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send
return None
def send_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_send row so it can be used to open the record
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.id
return None
def send_get_ref(self, result):
"""
Simple helper function to get the waybill reference of the newly
created inv_send row so it can be used to filter dataTables
"""
# The newly created inv_send will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_send = result["after"].records[0]
return new_inv_send.inv_send.send_ref
return None
# -------------------------------------------------------------------------
def recv_rec(self, result):
"""
Simple helper function to get the newly created inv_recv row
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv
return None
# -------------------------------------------------------------------------
def recv_get_id(self, result):
"""
Simple helper function to get the record id of the newly
created inv_recv row so it can be used to open the record
"""
# The newly created inv_recv will be the first record in the "after" list
if len(result["after"]) > 0:
new_inv_recv = result["after"].records[0]
return new_inv_recv.inv_recv.id
return None
# -------------------------------------------------------------------------
# Callback used to retrieve additional data to the create results
#
def dbcallback_getStockLevels(self, table, data, rows):
"""
Callback to add the total in stock for the selected item.
This can then be used to look at the value before and after
to ensure that the totals have been removed from the warehouse.
The stock row will be added to the *end* of the list of rows
"""
table = current.s3db["inv_inv_item"]
for details in data:
if details[0] == "send_inv_item_id":
inv_item_id = details[1]
break
stock_row = table[inv_item_id]
rows.records.append(stock_row)
return rows
# END =========================================================================
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Application-specific exceptions raised by the block structure framework.
"""
class BlockStructureException(Exception):
"""
Base class for all Block Structure framework exceptions.
"""
pass
class TransformerException(BlockStructureException):
"""
Exception class for Transformer related errors.
"""
pass
class UsageKeyNotInBlockStructure(BlockStructureException):
"""
Exception for when a usage key is not found within a block structure.
"""
pass
class TransformerDataIncompatible(BlockStructureException):
"""
Exception for when the version of a Transformer's data is not
compatible with the current version of the Transformer.
"""
pass
class BlockStructureNotFound(BlockStructureException):
"""
Exception for when a Block Structure is not found.
"""
def __init__(self, root_block_usage_key):
super(BlockStructureNotFound, self).__init__(
'Block structure not found; data_usage_key: {}'.format(root_block_usage_key)
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use std::{
fs::create_dir_all,
path::{Path, PathBuf},
};
use ureq::ResponseExt;
use crate::utils::http_utils::{base_ureq_agent, download};
pub const WEBVIEW2_BOOTSTRAPPER_URL: &str = "https://go.microsoft.com/fwlink/p/?LinkId=2124703";
pub const WEBVIEW2_OFFLINE_INSTALLER_X86_URL: &str =
"https://go.microsoft.com/fwlink/?linkid=2099617";
pub const WEBVIEW2_OFFLINE_INSTALLER_X64_URL: &str =
"https://go.microsoft.com/fwlink/?linkid=2124701";
pub const WEBVIEW2_URL_PREFIX: &str =
"https://msedge.sf.dl.delivery.mp.microsoft.com/filestreamingservice/files/";
pub const NSIS_OUTPUT_FOLDER_NAME: &str = "nsis";
pub const NSIS_UPDATER_OUTPUT_FOLDER_NAME: &str = "nsis-updater";
pub const WIX_OUTPUT_FOLDER_NAME: &str = "msi";
pub const WIX_UPDATER_OUTPUT_FOLDER_NAME: &str = "msi-updater";
pub fn webview2_guid_path(url: &str) -> crate::Result<(String, String)> {
let agent = base_ureq_agent();
let response = agent.head(url).call().map_err(Box::new)?;
let final_url = response.get_uri().to_string();
let remaining_url = final_url.strip_prefix(WEBVIEW2_URL_PREFIX).ok_or_else(|| {
crate::Error::GenericError(format!(
"WebView2 URL prefix mismatch. Expected `{WEBVIEW2_URL_PREFIX}`, found `{final_url}`."
))
})?;
let (guid, filename) = remaining_url.split_once('/').ok_or_else(|| {
crate::Error::GenericError(format!(
"WebView2 URL format mismatch. Expected `<GUID>/<FILENAME>`, found `{remaining_url}`."
))
})?;
Ok((guid.into(), filename.into()))
}
pub fn download_webview2_bootstrapper(base_path: &Path) -> crate::Result<PathBuf> {
let file_path = base_path.join("MicrosoftEdgeWebview2Setup.exe");
if !file_path.exists() {
std::fs::write(&file_path, download(WEBVIEW2_BOOTSTRAPPER_URL)?)?;
}
Ok(file_path)
}
pub fn download_webview2_offline_installer(base_path: &Path, arch: &str) -> crate::Result<PathBuf> {
let url = if arch == "x64" {
WEBVIEW2_OFFLINE_INSTALLER_X64_URL
} else {
WEBVIEW2_OFFLINE_INSTALLER_X86_URL
};
let (guid, filename) = webview2_guid_path(url)?;
let dir_path = base_path.join(guid);
let file_path = dir_path.join(filename);
if !file_path.exists() {
create_dir_all(dir_path)?;
std::fs::write(&file_path, download(url)?)?;
}
Ok(file_path)
}
#[cfg(target_os = "windows")]
pub fn os_bitness<'a>() -> Option<&'a str> {
use windows_sys::Win32::System::SystemInformation::{
GetNativeSystemInfo, PROCESSOR_ARCHITECTURE_AMD64, PROCESSOR_ARCHITECTURE_INTEL, SYSTEM_INFO,
};
let mut system_info: SYSTEM_INFO = unsafe { std::mem::zeroed() };
unsafe { GetNativeSystemInfo(&mut system_info) };
match unsafe { system_info.Anonymous.Anonymous.wProcessorArchitecture } {
PROCESSOR_ARCHITECTURE_INTEL => Some("x86"),
PROCESSOR_ARCHITECTURE_AMD64 => Some("x64"),
_ => None,
}
}
|
rust
|
github
|
https://github.com/tauri-apps/tauri
|
crates/tauri-bundler/src/bundle/windows/util.rs
|
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DialoGPT
## Overview
DialoGPT は、[DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://huggingface.co/papers/1911.00536) で Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao,
Jianfeng Gao, Jingjing Liu, Bill Dolan.これは、から抽出された 147M 万の会話のようなやりとりでトレーニングされた GPT2 モデルです。
レディット。
論文の要約は次のとおりです。
*私たちは、大規模で調整可能なニューラル会話応答生成モデル DialoGPT (対話生成事前トレーニング済み) を紹介します。
変成器)。 Reddit のコメント チェーンから抽出された 1 億 4,700 万件の会話のようなやり取りを対象にトレーニングされました。
2005 年から 2017 年にかけて、DialoGPT は人間に近いパフォーマンスを達成するために Hugging Face PyTorch トランスフォーマーを拡張しました。
シングルターンダイアログ設定における自動評価と人間による評価の両方。会話システムが
DialoGPT を活用すると、強力なベースラインよりも関連性が高く、内容が充実し、コンテキストに一貫性のある応答が生成されます。
システム。神経反応の研究を促進するために、事前トレーニングされたモデルとトレーニング パイプラインが公開されています。
よりインテリジェントなオープンドメイン対話システムの生成と開発。*
元のコードは [ここ](https://github.com/microsoft/DialoGPT) にあります。
## Usage tips
- DialoGPT は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。
左よりも。
- DialoGPT は、会話データの因果言語モデリング (CLM) 目標に基づいてトレーニングされているため、強力です
オープンドメイン対話システムにおける応答生成時。
- DialoGPT を使用すると、[DialoGPT's model card](https://huggingface.co/microsoft/DialoGPT-medium) に示されているように、ユーザーはわずか 10 行のコードでチャット ボットを作成できます。
トレーニング:
DialoGPT をトレーニングまたは微調整するには、因果言語モデリング トレーニングを使用できます。公式論文を引用すると: *私たちは
OpenAI GPT-2に従って、マルチターン対話セッションを長いテキストとしてモデル化し、生成タスクを言語としてフレーム化します
モデリング。まず、ダイアログ セッション内のすべてのダイアログ ターンを長いテキスト x_1,..., x_N に連結します (N は
* 詳細については、元の論文を参照してください。
<Tip>
DialoGPT のアーキテクチャは GPT2 モデルに基づいています。API リファレンスと例については、[GPT2 のドキュメント ページ](openai-community/gpt2) を参照してください。
</Tip>
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/ja/model_doc/dialogpt.md
|
//===--- GenDistributed.h - IRGen for distributed features ------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2020 - 2021 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines interfaces for emitting code for various distributed
// features.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_IRGEN_GENDISTRIBUTED_H
#define SWIFT_IRGEN_GENDISTRIBUTED_H
#include "swift/AST/Types.h"
#include "swift/Basic/LLVM.h"
#include "swift/SIL/ApplySite.h"
#include "llvm/IR/CallingConv.h"
#include "Callee.h"
#include "GenHeap.h"
#include "IRGenModule.h"
namespace llvm {
class Value;
}
namespace swift {
class CanType;
class ProtocolConformanceRef;
class SILType;
namespace irgen {
class Explosion;
class IRGenFunction;
/// Emit the '_distributedActorRemoteInitialize' call.
llvm::Value *emitDistributedActorInitializeRemote(
IRGenFunction &IGF,
SILType selfType,
llvm::Value *actorMetatype,
Explosion &out);
} // end namespace irgen
} // end namespace swift
#endif
|
c
|
github
|
https://github.com/apple/swift
|
lib/IRGen/GenDistributed.h
|
"""A user profile shows the publically available information about a user."""
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
import datetime
class UserProfile(models.Model):
"""Public user profile."""
user = models.ForeignKey(User, related_name='profile')
name = models.CharField(max_length=1000, blank=True, null=True)
image = models.ImageField(blank=True, null=True)
date_created = models.DateField(default=datetime.datetime.now)
def image_url(self):
"""Return image URL."""
if self.image and self.image.url:
return self.image.url
else:
return ''
def check_profile_exists(sender, instance, signal, *args, **kwargs):
"""Create a profile if a user does not have one."""
if sender is User:
if UserProfile.objects.filter(user=instance).count() == 0:
user_profile = UserProfile()
user_profile.user = instance
user_profile.name = instance.first_name
user_profile.save()
post_save.connect(check_profile_exists, sender=User)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Diese Klasse dient zum Laden von Assets, die nicht direkt mit den Planeten in Verbindung stehen.
Dazu gehoert das Setzen einer Hintergrundfarbe, aber auch das Laden von Musik, welche im Hintergrund spielt.
"""
__author__ = 'Thomas Taschner, Michael Weinberger'
__date__ = 20151209
__version__ = 1.0
class Ambiance:
def __init__(self, base, loader, volumeimpmarch, volumeambience, balanceimpmarch, balanceambience):
"""
Nimmt die Basis, auf der die Methoden aufbauen, zusammen mit den Parametern fuer die Lautstaerke und und die
Position der Audiostreams im Stereomix entgegegen.
:param base: das Basis-Objekt, auf dem alles aufbaut
:param loader: das Loader-Objekt, welches zum Laden von bestimmten Eigenschaften verwendet wird
:param volumeimpmarch: Pfad zu einer Audiodatei
:param volumeambience: Pfad zu einer Audiodatei
:param balanceimpmarch: Position des Files im Stereomix
:param balanceambience: Position des Files im Stereomix
"""
self.base = base
self.loader = loader
self.volumeimpmarch = volumeimpmarch
self.volumeambience = volumeambience
self.balanceimpmarch = balanceimpmarch
self.balanceambience = balanceambience
def initbg(self):
"""
Setzt die Hintergrundfarbe
Hier: 000, also schwarz
"""
self.base.setBackgroundColor(0, 0, 0)
def startsound(self):
"""
Startet den Imperial March und den Ambiance-Sound mit kaempfenden X-Wings
"""
theme = self.loader.loadSfx("models/starwars.mp4")
theme.setVolume(self.volumeimpmarch)
theme.setBalance(self.balanceimpmarch)
theme.setLoop(True) # Sonst nur 1 Play!
theme.play()
ambiance = self.loader.loadSfx("models/ambience.mp4")
ambiance.setVolume(self.volumeambience)
ambiance.setBalance(self.balanceambience)
ambiance.setLoop(True)
ambiance.play()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 OpenStack Foundation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_service import service
from neutron.agent.common import config
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent.linux import interface
from neutron.agent.metadata import config as metadata_config
from neutron.common import config as common_config
from neutron.common import topics
from neutron import service as neutron_service
def register_options():
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
cfg.CONF.register_opts(dhcp_config.DHCP_OPTS)
cfg.CONF.register_opts(dhcp_config.DNSMASQ_OPTS)
cfg.CONF.register_opts(metadata_config.DRIVER_OPTS)
cfg.CONF.register_opts(metadata_config.SHARED_OPTS)
cfg.CONF.register_opts(interface.OPTS)
def main():
register_options()
common_config.init(sys.argv[1:])
config.setup_logging()
server = neutron_service.Service.create(
binary='neutron-dhcp-agent',
topic=topics.DHCP_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport')
service.launch(cfg.CONF, server).wait()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer;
import org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.CollectionUtils;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* <p>The sticky assignor serves two purposes. First, it guarantees an assignment that is as balanced as possible, meaning either:
* <ul>
* <li>the numbers of topic partitions assigned to consumers differ by at most one; or</li>
* <li>each consumer that has 2+ fewer topic partitions than some other consumer cannot get any of those topic partitions transferred to it.</li>
* </ul>
* Second, it preserved as many existing assignment as possible when a reassignment occurs. This helps in saving some of the
* overhead processing when topic partitions move from one consumer to another.</p>
*
* <p>Starting fresh it would work by distributing the partitions over consumers as evenly as possible. Even though this may sound similar to
* how round robin assignor works, the second example below shows that it is not.
* During a reassignment it would perform the reassignment in such a way that in the new assignment
* <ol>
* <li>topic partitions are still distributed as evenly as possible, and</li>
* <li>topic partitions stay with their previously assigned consumers as much as possible.</li>
* </ol>
* Of course, the first goal above takes precedence over the second one.</p>
*
* <p><b>Example 1.</b> Suppose there are three consumers <code>C0</code>, <code>C1</code>, <code>C2</code>,
* four topics <code>t0,</code> <code>t1</code>, <code>t2</code>, <code>t3</code>, and each topic has 2 partitions,
* resulting in partitions <code>t0p0</code>, <code>t0p1</code>, <code>t1p0</code>, <code>t1p1</code>, <code>t2p0</code>,
* <code>t2p1</code>, <code>t3p0</code>, <code>t3p1</code>. Each consumer is subscribed to all three topics.
*
* The assignment with both sticky and round robin assignors will be:
* <ul>
* <li><code>C0: [t0p0, t1p1, t3p0]</code></li>
* <li><code>C1: [t0p1, t2p0, t3p1]</code></li>
* <li><code>C2: [t1p0, t2p1]</code></li>
* </ul>
*
* Now, let's assume <code>C1</code> is removed and a reassignment is about to happen. The round robin assignor would produce:
* <ul>
* <li><code>C0: [t0p0, t1p0, t2p0, t3p0]</code></li>
* <li><code>C2: [t0p1, t1p1, t2p1, t3p1]</code></li>
* </ul>
*
* while the sticky assignor would result in:
* <ul>
* <li><code>C0 [t0p0, t1p1, t3p0, t2p0]</code></li>
* <li><code>C2 [t1p0, t2p1, t0p1, t3p1]</code></li>
* </ul>
* preserving all the previous assignments (unlike the round robin assignor).
*</p>
* <p><b>Example 2.</b> There are three consumers <code>C0</code>, <code>C1</code>, <code>C2</code>,
* and three topics <code>t0</code>, <code>t1</code>, <code>t2</code>, with 1, 2, and 3 partitions respectively.
* Therefore, the partitions are <code>t0p0</code>, <code>t1p0</code>, <code>t1p1</code>, <code>t2p0</code>,
* <code>t2p1</code>, <code>t2p2</code>. <code>C0</code> is subscribed to <code>t0</code>; <code>C1</code> is subscribed to
* <code>t0</code>, <code>t1</code>; and <code>C2</code> is subscribed to <code>t0</code>, <code>t1</code>, <code>t2</code>.
*
* The round robin assignor would come up with the following assignment:
* <ul>
* <li><code>C0 [t0p0]</code></li>
* <li><code>C1 [t1p0]</code></li>
* <li><code>C2 [t1p1, t2p0, t2p1, t2p2]</code></li>
* </ul>
*
* which is not as balanced as the assignment suggested by sticky assignor:
* <ul>
* <li><code>C0 [t0p0]</code></li>
* <li><code>C1 [t1p0, t1p1]</code></li>
* <li><code>C2 [t2p0, t2p1, t2p2]</code></li>
* </ul>
*
* Now, if consumer <code>C0</code> is removed, these two assignors would produce the following assignments.
* Round Robin (preserves 3 partition assignments):
* <ul>
* <li><code>C1 [t0p0, t1p1]</code></li>
* <li><code>C2 [t1p0, t2p0, t2p1, t2p2]</code></li>
* </ul>
*
* Sticky (preserves 5 partition assignments):
* <ul>
* <li><code>C1 [t1p0, t1p1, t0p0]</code></li>
* <li><code>C2 [t2p0, t2p1, t2p2]</code></li>
* </ul>
*</p>
* <h3>Impact on <code>ConsumerRebalanceListener</code></h3>
* The sticky assignment strategy can provide some optimization to those consumers that have some partition cleanup code
* in their <code>onPartitionsRevoked()</code> callback listeners. The cleanup code is placed in that callback listener
* because the consumer has no assumption or hope of preserving any of its assigned partitions after a rebalance when it
* is using range or round robin assignor. The listener code would look like this:
* <pre>
* {@code
* class TheOldRebalanceListener implements ConsumerRebalanceListener {
*
* void onPartitionsRevoked(Collection<TopicPartition> partitions) {
* for (TopicPartition partition: partitions) {
* commitOffsets(partition);
* cleanupState(partition);
* }
* }
*
* void onPartitionsAssigned(Collection<TopicPartition> partitions) {
* for (TopicPartition partition: partitions) {
* initializeState(partition);
* initializeOffset(partition);
* }
* }
* }
* }
* </pre>
*
* As mentioned above, one advantage of the sticky assignor is that, in general, it reduces the number of partitions that
* actually move from one consumer to another during a reassignment. Therefore, it allows consumers to do their cleanup
* more efficiently. Of course, they still can perform the partition cleanup in the <code>onPartitionsRevoked()</code>
* listener, but they can be more efficient and make a note of their partitions before and after the rebalance, and do the
* cleanup after the rebalance only on the partitions they have lost (which is normally not a lot). The code snippet below
* clarifies this point:
* <pre>
* {@code
* class TheNewRebalanceListener implements ConsumerRebalanceListener {
* Collection<TopicPartition> lastAssignment = Collections.emptyList();
*
* void onPartitionsRevoked(Collection<TopicPartition> partitions) {
* for (TopicPartition partition: partitions)
* commitOffsets(partition);
* }
*
* void onPartitionsAssigned(Collection<TopicPartition> assignment) {
* for (TopicPartition partition: difference(lastAssignment, assignment))
* cleanupState(partition);
*
* for (TopicPartition partition: difference(assignment, lastAssignment))
* initializeState(partition);
*
* for (TopicPartition partition: assignment)
* initializeOffset(partition);
*
* this.lastAssignment = assignment;
* }
* }
* }
* </pre>
*
* Any consumer that uses sticky assignment can leverage this listener like this:
* <code>consumer.subscribe(topics, new TheNewRebalanceListener());</code>
*
* Note that you can leverage the {@link CooperativeStickyAssignor} so that only partitions which are being
* reassigned to another consumer will be revoked. That is the preferred assignor for newer cluster. See
* {@link ConsumerPartitionAssignor.RebalanceProtocol} for a detailed explanation of cooperative rebalancing.
*/
public class StickyAssignor extends AbstractStickyAssignor {
public static final String STICKY_ASSIGNOR_NAME = "sticky";
// these schemas are used for preserving consumer's previously assigned partitions
// list and sending it as user data to the leader during a rebalance
static final String TOPIC_PARTITIONS_KEY_NAME = "previous_assignment";
static final String TOPIC_KEY_NAME = "topic";
static final String PARTITIONS_KEY_NAME = "partitions";
private static final String GENERATION_KEY_NAME = "generation";
static final Schema TOPIC_ASSIGNMENT = new Schema(
new Field(TOPIC_KEY_NAME, Type.STRING),
new Field(PARTITIONS_KEY_NAME, new ArrayOf(Type.INT32)));
static final Schema STICKY_ASSIGNOR_USER_DATA_V0 = new Schema(
new Field(TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(TOPIC_ASSIGNMENT)));
private static final Schema STICKY_ASSIGNOR_USER_DATA_V1 = new Schema(
new Field(TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(TOPIC_ASSIGNMENT)),
new Field(GENERATION_KEY_NAME, Type.INT32));
private List<TopicPartition> memberAssignment = null;
private int generation = DEFAULT_GENERATION; // consumer group generation
@Override
public String name() {
return STICKY_ASSIGNOR_NAME;
}
@Override
public void onAssignment(Assignment assignment, ConsumerGroupMetadata metadata) {
memberAssignment = assignment.partitions();
this.generation = metadata.generationId();
}
@Override
public ByteBuffer subscriptionUserData(Set<String> topics) {
if (memberAssignment == null)
return null;
return serializeTopicPartitionAssignment(new MemberData(memberAssignment, Optional.of(generation)));
}
@Override
protected MemberData memberData(Subscription subscription) {
// Always deserialize ownedPartitions and generation id from user data
// since StickyAssignor is an eager rebalance protocol that will revoke all existing partitions before joining group
ByteBuffer userData = subscription.userData();
if (userData == null || !userData.hasRemaining()) {
return new MemberData(Collections.emptyList(), Optional.empty(), subscription.rackId());
}
return deserializeTopicPartitionAssignment(userData);
}
// visible for testing
static ByteBuffer serializeTopicPartitionAssignment(MemberData memberData) {
Struct struct = new Struct(STICKY_ASSIGNOR_USER_DATA_V1);
List<Struct> topicAssignments = new ArrayList<>();
for (Map.Entry<String, List<Integer>> topicEntry : CollectionUtils.groupPartitionsByTopic(memberData.partitions).entrySet()) {
Struct topicAssignment = new Struct(TOPIC_ASSIGNMENT);
topicAssignment.set(TOPIC_KEY_NAME, topicEntry.getKey());
topicAssignment.set(PARTITIONS_KEY_NAME, topicEntry.getValue().toArray());
topicAssignments.add(topicAssignment);
}
struct.set(TOPIC_PARTITIONS_KEY_NAME, topicAssignments.toArray());
memberData.generation.ifPresent(integer -> struct.set(GENERATION_KEY_NAME, integer));
ByteBuffer buffer = ByteBuffer.allocate(STICKY_ASSIGNOR_USER_DATA_V1.sizeOf(struct));
STICKY_ASSIGNOR_USER_DATA_V1.write(buffer, struct);
buffer.flip();
return buffer;
}
private static MemberData deserializeTopicPartitionAssignment(ByteBuffer buffer) {
Struct struct;
ByteBuffer copy = buffer.duplicate();
try {
struct = STICKY_ASSIGNOR_USER_DATA_V1.read(buffer);
} catch (Exception e1) {
try {
// fall back to older schema
struct = STICKY_ASSIGNOR_USER_DATA_V0.read(copy);
} catch (Exception e2) {
// ignore the consumer's previous assignment if it cannot be parsed
return new MemberData(Collections.emptyList(), Optional.of(DEFAULT_GENERATION));
}
}
List<TopicPartition> partitions = new ArrayList<>();
for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) {
Struct assignment = (Struct) structObj;
String topic = assignment.getString(TOPIC_KEY_NAME);
for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) {
Integer partition = (Integer) partitionObj;
partitions.add(new TopicPartition(topic, partition));
}
}
// make sure this is backward compatible
Optional<Integer> generation = struct.hasField(GENERATION_KEY_NAME) ? Optional.of(struct.getInt(GENERATION_KEY_NAME)) : Optional.empty();
return new MemberData(partitions, generation);
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/StickyAssignor.java
|
"""
web.py
Contains functions for interacting with web services.
Created by:
- Bjorn Neergaard <https://github.com/neersighted>
Maintainer:
- Luke Rogers <https://github.com/lukeroge>
License:
GPL v3
"""
import json
import logging
import requests
from requests import RequestException
# Constants
DEFAULT_SHORTENER = 'is.gd'
DEFAULT_PASTEBIN = 'hastebin'
HASTEBIN_SERVER = 'https://hastebin.com'
logger = logging.getLogger('cloudbot')
# Shortening / pasting
# Public API
def shorten(url, custom=None, key=None, service=DEFAULT_SHORTENER):
impl = shorteners[service]
return impl.shorten(url, custom, key)
def try_shorten(url, custom=None, key=None, service=DEFAULT_SHORTENER):
impl = shorteners[service]
return impl.try_shorten(url, custom, key)
def expand(url, service=None):
if service:
impl = shorteners[service]
else:
impl = None
for name in shorteners:
if name in url:
impl = shorteners[name]
break
if impl is None:
impl = Shortener()
return impl.expand(url)
def paste(data, ext='txt', service=DEFAULT_PASTEBIN):
bins = pastebins.copy()
impl = bins.pop(service, None)
while impl:
try:
return impl.paste(data, ext)
except ServiceError:
logger.exception("Paste failed")
try:
_, impl = bins.popitem()
except LookupError:
impl = None
return "Unable to paste data"
class ServiceError(Exception):
def __init__(self, message, request):
super().__init__()
self.message = message
self.request = request
def __str__(self):
return '[HTTP {}] {}'.format(self.request.status_code, self.message)
class Shortener:
def __init__(self):
pass
def shorten(self, url, custom=None, key=None):
return url
def try_shorten(self, url, custom=None, key=None):
try:
return self.shorten(url, custom, key)
except ServiceError:
return url
def expand(self, url):
r = requests.get(url, allow_redirects=False)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
if 'location' in r.headers:
return r.headers['location']
raise ServiceError('That URL does not exist', r)
class Pastebin:
def __init__(self):
pass
def paste(self, data, ext):
raise NotImplementedError
# Internal Implementations
shorteners = {}
pastebins = {}
def _shortener(name):
def _decorate(impl):
shorteners[name] = impl()
return _decorate
def _pastebin(name):
def _decorate(impl):
pastebins[name] = impl()
return _decorate
@_shortener('is.gd')
class Isgd(Shortener):
def shorten(self, url, custom=None, key=None):
p = {'url': url, 'shorturl': custom, 'format': 'json'}
r = requests.get('http://is.gd/create.php', params=p)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
j = r.json()
if 'shorturl' in j:
return j['shorturl']
raise ServiceError(j['errormessage'], r)
def expand(self, url):
p = {'shorturl': url, 'format': 'json'}
r = requests.get('http://is.gd/forward.php', params=p)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
j = r.json()
if 'url' in j:
return j['url']
raise ServiceError(j['errormessage'], r)
@_shortener('goo.gl')
class Googl(Shortener):
def shorten(self, url, custom=None, key=None):
h = {'content-type': 'application/json'}
k = {'key': key}
p = {'longUrl': url}
r = requests.post('https://www.googleapis.com/urlshortener/v1/url', params=k, data=json.dumps(p), headers=h)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
j = r.json()
if 'error' not in j:
return j['id']
raise ServiceError(j['error']['message'], r)
def expand(self, url):
p = {'shortUrl': url}
r = requests.get('https://www.googleapis.com/urlshortener/v1/url', params=p)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
j = r.json()
if 'error' not in j:
return j['longUrl']
raise ServiceError(j['error']['message'], r)
@_shortener('git.io')
class Gitio(Shortener):
def shorten(self, url, custom=None, key=None):
p = {'url': url, 'code': custom}
r = requests.post('http://git.io', data=p)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
if r.status_code == requests.codes.created:
s = r.headers['location']
if custom and custom not in s:
raise ServiceError('That URL is already in use', r)
return s
raise ServiceError(r.text, r)
@_pastebin('hastebin')
class Hastebin(Pastebin):
def paste(self, data, ext):
r = requests.post(HASTEBIN_SERVER + '/documents', data=data)
try:
r.raise_for_status()
except RequestException as e:
r = e.response
raise ServiceError(r.reason, r)
else:
j = r.json()
if r.status_code is requests.codes.ok:
return '{}/{}.{}'.format(HASTEBIN_SERVER, j['key'], ext)
raise ServiceError(j['message'], r)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Func, Value
from django.db.models.fields import FloatField, IntegerField
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
class Aggregate(Func):
contains_aggregate = True
name = None
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
assert len(self.source_expressions) == 1
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
if c.source_expressions[0].contains_aggregate and not summarize:
name = self.source_expressions[0].name
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
c.name, name, name))
c._patch_aggregate(query) # backward-compatibility support
return c
@property
def input_field(self):
return self.source_expressions[0]
@property
def default_alias(self):
if hasattr(self.source_expressions[0], 'name'):
return '%s__%s' % (self.source_expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self):
return []
def _patch_aggregate(self, query):
"""
Helper method for patching 3rd party aggregates that do not yet support
the new way of subclassing. This method should be removed in 2.0
add_to_query(query, alias, col, source, is_summary) will be defined on
legacy aggregates which, in turn, instantiates the SQL implementation of
the aggregate. In all the cases found, the general implementation of
add_to_query looks like:
def add_to_query(self, query, alias, col, source, is_summary):
klass = SQLImplementationAggregate
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
By supplying a known alias, we can get the SQLAggregate out of the
aggregates dict, and use the sql_function and sql_template attributes
to patch *this* aggregate.
"""
if not hasattr(self, 'add_to_query') or self.function is not None:
return
placeholder_alias = "_XXXXXXXX_"
self.add_to_query(query, placeholder_alias, None, None, None)
sql_aggregate = query.aggregates.pop(placeholder_alias)
if 'sql_function' not in self.extra and hasattr(sql_aggregate, 'sql_function'):
self.extra['function'] = sql_aggregate.sql_function
if hasattr(sql_aggregate, 'sql_template'):
self.extra['template'] = sql_aggregate.sql_template
class Avg(Aggregate):
function = 'AVG'
name = 'Avg'
def __init__(self, expression, **extra):
super(Avg, self).__init__(expression, output_field=FloatField(), **extra)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
class Count(Aggregate):
function = 'COUNT'
name = 'Count'
template = '%(function)s(%(distinct)s%(expressions)s)'
def __init__(self, expression, distinct=False, **extra):
if expression == '*':
expression = Value(expression)
super(Count, self).__init__(
expression, distinct='DISTINCT ' if distinct else '', output_field=IntegerField(), **extra)
def __repr__(self):
return "{}({}, distinct={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.extra['distinct'] == '' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class Max(Aggregate):
function = 'MAX'
name = 'Max'
class Min(Aggregate):
function = 'MIN'
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
def __init__(self, expression, sample=False, **extra):
self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
super(StdDev, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'STDDEV_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
class Sum(Aggregate):
function = 'SUM'
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
def __init__(self, expression, sample=False, **extra):
self.function = 'VAR_SAMP' if sample else 'VAR_POP'
super(Variance, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'VAR_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
|
unknown
|
codeparrot/codeparrot-clean
| ||
'''
backends/s3c.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from ..logging import logging, QuietError # Ensure use of custom logger class
from .. import BUFSIZE
from .common import (AbstractBackend, NoSuchObject, retry, AuthorizationError,
AuthenticationError, DanglingStorageURLError, retry_generator,
get_proxy, get_ssl_context, CorruptedObjectError, safe_unpickle,
checksum_basic_mapping)
from ..inherit_docstrings import (copy_ancestor_docstring, prepend_ancestor_docstring,
ABCDocstMeta)
from io import BytesIO
from shutil import copyfileobj
from dugong import (HTTPConnection, is_temp_network_error, BodyFollowing, CaseInsensitiveDict,
UnsupportedResponse, ConnectionClosed)
from base64 import b64encode, b64decode
from email.utils import parsedate_tz, mktime_tz
from ast import literal_eval
from urllib.parse import urlsplit, quote, unquote
import defusedxml.cElementTree as ElementTree
from itertools import count
import hashlib
import os
import binascii
import hmac
import re
import tempfile
import time
import urllib.parse
import pickle
C_DAY_NAMES = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun' ]
C_MONTH_NAMES = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ]
XML_CONTENT_RE = re.compile(r'^(?:application|text)/xml(?:;|$)', re.IGNORECASE)
log = logging.getLogger(__name__)
class Backend(AbstractBackend, metaclass=ABCDocstMeta):
"""A backend to stored data in some S3 compatible storage service.
The backend guarantees only immediate get after create consistency.
"""
xml_ns_prefix = '{http://s3.amazonaws.com/doc/2006-03-01/}'
hdr_prefix = 'x-amz-'
known_options = {'no-ssl', 'ssl-ca-path', 'tcp-timeout',
'dumb-copy', 'disable-expect100'}
def __init__(self, storage_url, login, password, options):
'''Initialize backend object
*ssl_context* may be a `ssl.SSLContext` instance or *None*.
'''
super().__init__()
if 'no-ssl' in options:
self.ssl_context = None
else:
self.ssl_context = get_ssl_context(options.get('ssl-ca-path', None))
(host, port, bucket_name, prefix) = self._parse_storage_url(storage_url,
self.ssl_context)
self.options = options
self.bucket_name = bucket_name
self.prefix = prefix
self.hostname = host
self.port = port
self.proxy = get_proxy(self.ssl_context is not None)
self.conn = self._get_conn()
self.password = password
self.login = login
@property
@copy_ancestor_docstring
def has_native_rename(self):
return False
# NOTE: ! This function is also used by the swift backend !
@copy_ancestor_docstring
def reset(self):
if self.conn.response_pending() or self.conn._out_remaining:
log.debug('Resetting state of http connection %d', id(self.conn))
self.conn.disconnect()
@staticmethod
def _parse_storage_url(storage_url, ssl_context):
'''Extract information from storage URL
Return a tuple * (host, port, bucket_name, prefix) * .
'''
hit = re.match(r'^[a-zA-Z0-9]+://' # Backend
r'([^/:]+)' # Hostname
r'(?::([0-9]+))?' # Port
r'/([^/]+)' # Bucketname
r'(?:/(.*))?$', # Prefix
storage_url)
if not hit:
raise QuietError('Invalid storage URL', exitcode=2)
hostname = hit.group(1)
if hit.group(2):
port = int(hit.group(2))
elif ssl_context:
port = 443
else:
port = 80
bucketname = hit.group(3)
prefix = hit.group(4) or ''
return (hostname, port, bucketname, prefix)
def _get_conn(self):
'''Return connection to server'''
conn = HTTPConnection(self.hostname, self.port, proxy=self.proxy,
ssl_context=self.ssl_context)
conn.timeout = int(self.options.get('tcp-timeout', 10))
return conn
# This method is also used implicitly for the retry handling of
# `gs.Backend._get_access_token`. When modifying this method, do not forget
# to check if this makes it unsuitable for use by `_get_access_token` (in
# that case we will have to implement a custom retry logic there).
@copy_ancestor_docstring
def is_temp_failure(self, exc): #IGNORE:W0613
if isinstance(exc, (InternalError, BadDigestError, IncompleteBodyError,
RequestTimeoutError, OperationAbortedError, SlowDownError)):
return True
elif is_temp_network_error(exc):
return True
# In doubt, we retry on 5xx (Server error). However, there are some
# codes where retry is definitely not desired. For 4xx (client error) we
# do not retry in general, but for 408 (Request Timeout) RFC 2616
# specifies that the client may repeat the request without
# modifications.
elif (isinstance(exc, HTTPError) and
((500 <= exc.status <= 599
and exc.status not in (501,505,508,510,511,523))
or exc.status == 408)):
return True
return False
# NOTE: ! This function is also used by the swift backend. !
def _dump_response(self, resp, body=None):
'''Return string representation of server response
Only the beginning of the response body is read, so this is
mostly useful for debugging.
'''
if body is None:
try:
body = self.conn.read(2048)
if body:
self.conn.discard()
except UnsupportedResponse:
log.warning('Unsupported response, trying to retrieve data from raw socket!')
body = self.conn.read_raw(2048)
self.conn.close()
else:
body = body[:2048]
return '%d %s\n%s\n\n%s' % (resp.status, resp.reason,
'\n'.join('%s: %s' % x for x in resp.headers.items()),
body.decode('utf-8', errors='backslashreplace'))
# NOTE: ! This function is also used by the swift backend. !
def _assert_empty_response(self, resp):
'''Assert that current response body is empty'''
buf = self.conn.read(2048)
if not buf:
return # expected
# Log the problem
self.conn.discard()
log.error('Unexpected server response. Expected nothing, got:\n'
'%d %s\n%s\n\n%s', resp.status, resp.reason,
'\n'.join('%s: %s' % x for x in resp.headers.items()),
buf)
raise RuntimeError('Unexpected server response')
@retry
@copy_ancestor_docstring
def delete(self, key, force=False):
log.debug('started with %s', key)
try:
resp = self._do_request('DELETE', '/%s%s' % (self.prefix, key))
self._assert_empty_response(resp)
except NoSuchKeyError:
if force:
pass
else:
raise NoSuchObject(key)
@retry_generator
@copy_ancestor_docstring
def list(self, prefix='', start_after=''):
log.debug('started with %s, %s', prefix, start_after)
keys_remaining = True
marker = self.prefix + start_after
prefix = self.prefix + prefix
ns_p = self.xml_ns_prefix
while keys_remaining:
log.debug('requesting with marker=%s', marker)
keys_remaining = None
resp = self._do_request('GET', '/', query_string={ 'prefix': prefix,
'marker': marker,
'max-keys': 1000 })
if not XML_CONTENT_RE.match(resp.headers['Content-Type']):
raise RuntimeError('unexpected content type: %s' %
resp.headers['Content-Type'])
try:
itree = iter(ElementTree.iterparse(self.conn, events=("start", "end")))
(event, root) = next(itree)
for (event, el) in itree:
if event != 'end':
continue
if el.tag == ns_p + 'IsTruncated':
keys_remaining = (el.text == 'true')
elif el.tag == ns_p + 'Contents':
marker = el.findtext(ns_p + 'Key')
yield marker[len(self.prefix):]
root.clear()
except Exception as exc:
if is_temp_network_error(exc):
# We probably can't use the connection anymore
self.conn.disconnect()
raise
except GeneratorExit:
# Need to read rest of response
self.conn.discard()
break
if keys_remaining is None:
raise RuntimeError('Could not parse body')
@retry
@copy_ancestor_docstring
def lookup(self, key):
log.debug('started with %s', key)
try:
resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key))
self._assert_empty_response(resp)
except HTTPError as exc:
if exc.status == 404:
raise NoSuchObject(key)
else:
raise
return self._extractmeta(resp, key)
@retry
@copy_ancestor_docstring
def get_size(self, key):
log.debug('started with %s', key)
try:
resp = self._do_request('HEAD', '/%s%s' % (self.prefix, key))
self._assert_empty_response(resp)
except HTTPError as exc:
if exc.status == 404:
raise NoSuchObject(key)
else:
raise
try:
return int(resp.headers['Content-Length'])
except KeyError:
raise RuntimeError('HEAD request did not return Content-Length')
@retry
@copy_ancestor_docstring
def open_read(self, key):
try:
resp = self._do_request('GET', '/%s%s' % (self.prefix, key))
except NoSuchKeyError:
raise NoSuchObject(key)
try:
meta = self._extractmeta(resp, key)
except (BadDigestError, CorruptedObjectError):
# If there's less than 64 kb of data, read and throw
# away. Otherwise re-establish connection.
if resp.length is not None and resp.length < 64*1024:
self.conn.discard()
else:
self.conn.disconnect()
raise
return ObjectR(key, resp, self, meta)
@prepend_ancestor_docstring
def open_write(self, key, metadata=None, is_compressed=False, extra_headers=None):
"""
The returned object will buffer all data and only start the upload
when its `close` method is called.
"""
log.debug('started with %s', key)
headers = CaseInsensitiveDict()
if extra_headers is not None:
headers.update(extra_headers)
if metadata is None:
metadata = dict()
self._add_meta_headers(headers, metadata)
return ObjectW(key, self, headers)
# NOTE: ! This function is also used by the swift backend. !
def _add_meta_headers(self, headers, metadata, chunksize=255):
hdr_count = 0
length = 0
for key in metadata.keys():
if not isinstance(key, str):
raise ValueError('dict keys must be str, not %s' % type(key))
val = metadata[key]
if (not isinstance(val, (str, bytes, int, float, complex, bool))
and val is not None):
raise ValueError('value for key %s (%s) is not elementary' % (key, val))
if isinstance(val, (bytes, bytearray)):
val = b64encode(val)
buf = ('%s: %s,' % (repr(key), repr(val)))
buf = quote(buf, safe='!@#$^*()=+/?-_\'"><\\| `.,;:~')
if len(buf) < chunksize:
headers['%smeta-%03d' % (self.hdr_prefix, hdr_count)] = buf
hdr_count += 1
length += 4 + len(buf)
else:
i = 0
while i*chunksize < len(buf):
k = '%smeta-%03d' % (self.hdr_prefix, hdr_count)
v = buf[i*chunksize:(i+1)*chunksize]
headers[k] = v
i += 1
hdr_count += 1
length += 4 + len(buf)
if length > 2048:
raise ValueError('Metadata too large')
assert hdr_count <= 999
md5 = b64encode(checksum_basic_mapping(metadata)).decode('ascii')
headers[self.hdr_prefix + 'meta-format'] = 'raw2'
headers[self.hdr_prefix + 'meta-md5'] = md5
@retry
@copy_ancestor_docstring
def copy(self, src, dest, metadata=None, extra_headers=None):
log.debug('started with %s, %s', src, dest)
headers = CaseInsensitiveDict()
if extra_headers is not None:
headers.update(extra_headers)
headers[self.hdr_prefix + 'copy-source'] = \
'/%s/%s%s' % (self.bucket_name, self.prefix, src)
if metadata is None:
headers[self.hdr_prefix + 'metadata-directive'] = 'COPY'
else:
headers[self.hdr_prefix + 'metadata-directive'] = 'REPLACE'
self._add_meta_headers(headers, metadata)
try:
resp = self._do_request('PUT', '/%s%s' % (self.prefix, dest), headers=headers)
except NoSuchKeyError:
raise NoSuchObject(src)
# When copying, S3 may return error despite a 200 OK status
# http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# https://doc.s3.amazonaws.com/proposals/copy.html
if self.options.get('dumb-copy', False):
self.conn.discard()
return
body = self.conn.readall()
root = self._parse_xml_response(resp, body)
if root.tag == self.xml_ns_prefix + 'CopyObjectResult':
return
elif root.tag == 'Error':
raise get_S3Error(root.findtext('Code'), root.findtext('Message'),
resp.headers)
else:
log.error('Unexpected server reply to copy operation:\n%s',
self._dump_response(resp, body))
raise RuntimeError('Copy response has %s as root tag' % root.tag)
@copy_ancestor_docstring
def update_meta(self, key, metadata):
self.copy(key, key, metadata)
def _do_request(self, method, path, subres=None, query_string=None,
headers=None, body=None):
'''Send request, read and return response object'''
log.debug('started with %s %s?%s, qs=%s', method, path, subres, query_string)
if headers is None:
headers = CaseInsensitiveDict()
if isinstance(body, (bytes, bytearray, memoryview)):
headers['Content-MD5'] = md5sum_b64(body)
redirect_count = 0
this_method = method
while True:
resp = self._send_request(this_method, path, headers=headers, subres=subres,
query_string=query_string, body=body)
if (resp.status < 300 or resp.status > 399):
break
# Assume redirect
redirect_count += 1
if redirect_count > 10:
raise RuntimeError('Too many chained redirections')
# First try location header...
new_url = resp.headers['Location']
if new_url:
# Discard body
self.conn.discard()
# Pylint can't infer SplitResult Types
#pylint: disable=E1103
o = urlsplit(new_url)
if o.scheme:
if self.ssl_context and o.scheme != 'https':
raise RuntimeError('Redirect to non-https URL')
elif not self.ssl_context and o.scheme != 'http':
raise RuntimeError('Redirect to non-http URL')
if o.hostname != self.hostname or o.port != self.port:
self.hostname = o.hostname
self.port = o.port
self.conn.disconnect()
self.conn = self._get_conn()
else:
raise RuntimeError('Redirect to different path on same host')
# ..but endpoint may also be hidden in message body.
# If we have done a HEAD request, we have to change to GET
# to actually retrieve the body.
elif resp.method == 'HEAD':
log.debug('Switching from HEAD to GET to read redirect body')
this_method = 'GET'
# Try to read new URL from request body
else:
tree = self._parse_xml_response(resp)
new_url = tree.findtext('Endpoint')
if not new_url:
raise get_S3Error(tree.findtext('Code'), tree.findtext('Message'),
resp.headers)
self.hostname = new_url
self.conn.disconnect()
self.conn = self._get_conn()
# Update method
this_method = method
log.info('_do_request(): redirected to %s', self.conn.hostname)
if body and not isinstance(body, (bytes, bytearray, memoryview)):
body.seek(0)
# At the end, the request should have gone out with the right
# method
if this_method != method:
raise RuntimeError('Dazed and confused - HEAD fails but GET works?')
# Success
if resp.status >= 200 and resp.status <= 299:
return resp
# Error
self._parse_error_response(resp)
def _parse_error_response(self, resp):
'''Handle error response from server
Try to raise most-specific exception.
'''
# Note that even though the final server backend may guarantee to always
# deliver an XML document body with a detailed error message, we may
# also get errors from intermediate proxies.
content_type = resp.headers['Content-Type']
# If method == HEAD, server must not return response body
# even in case of errors
if resp.method.upper() == 'HEAD':
assert self.conn.read(1) == b''
raise HTTPError(resp.status, resp.reason, resp.headers)
# If not XML, do the best we can
if not XML_CONTENT_RE.match(content_type):
self.conn.discard()
raise HTTPError(resp.status, resp.reason, resp.headers)
# We don't stream the data into the parser because we want
# to be able to dump a copy if the parsing fails.
body = self.conn.readall()
try:
tree = ElementTree.parse(BytesIO(body)).getroot()
except:
log.error('Unable to parse server response as XML:\n%s',
self._dump_response(resp, body))
raise
raise get_S3Error(tree.findtext('Code'), tree.findtext('Message'), resp.headers)
def _parse_xml_response(self, resp, body=None):
'''Return element tree for XML response'''
content_type = resp.headers['Content-Type']
# AWS S3 sometimes "forgets" to send a Content-Type
# when responding to a multiple delete request.
# https://forums.aws.amazon.com/thread.jspa?threadID=134372
if content_type is None:
log.error('Server did not provide Content-Type, assuming XML')
elif not XML_CONTENT_RE.match(content_type):
log.error('Unexpected server reply: expected XML, got:\n%s',
self._dump_response(resp))
raise RuntimeError('Unexpected server response')
# We don't stream the data into the parser because we want
# to be able to dump a copy if the parsing fails.
if body is None:
body = self.conn.readall()
try:
tree = ElementTree.parse(BytesIO(body)).getroot()
except:
log.error('Unable to parse server response as XML:\n%s',
self._dump_response(resp, body))
raise
return tree
# NOTE: ! This function is also used by the swift backend. !
@prepend_ancestor_docstring
def clear(self):
"""
This method may not be able to see (and therefore also not delete)
recently uploaded objects.
"""
# We have to cache keys, because otherwise we can't use the
# http connection to delete keys.
for (no, s3key) in enumerate(list(self)):
if no != 0 and no % 1000 == 0:
log.info('clear(): deleted %d objects so far..', no)
log.debug('started with %s', s3key)
# Ignore missing objects when clearing bucket
self.delete(s3key, True)
def __str__(self):
return 's3c://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix)
def _authorize_request(self, method, path, headers, subres):
'''Add authorization information to *headers*'''
# See http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html
# Date, can't use strftime because it's locale dependent
now = time.gmtime()
headers['Date'] = ('%s, %02d %s %04d %02d:%02d:%02d GMT'
% (C_DAY_NAMES[now.tm_wday],
now.tm_mday,
C_MONTH_NAMES[now.tm_mon - 1],
now.tm_year, now.tm_hour,
now.tm_min, now.tm_sec))
auth_strs = [method, '\n']
for hdr in ('Content-MD5', 'Content-Type', 'Date'):
if hdr in headers:
auth_strs.append(headers[hdr])
auth_strs.append('\n')
for hdr in sorted(x for x in headers if x.lower().startswith('x-amz-')):
val = ' '.join(re.split(r'\s*\n\s*', headers[hdr].strip()))
auth_strs.append('%s:%s\n' % (hdr, val))
# Always include bucket name in path for signing
sign_path = urllib.parse.quote('/%s%s' % (self.bucket_name, path))
auth_strs.append(sign_path)
if subres:
auth_strs.append('?%s' % subres)
# False positive, hashlib *does* have sha1 member
#pylint: disable=E1101
auth_str = ''.join(auth_strs).encode()
signature = b64encode(hmac.new(self.password.encode(), auth_str,
hashlib.sha1).digest()).decode()
headers['Authorization'] = 'AWS %s:%s' % (self.login, signature)
def _send_request(self, method, path, headers, subres=None, query_string=None, body=None):
'''Add authentication and send request
Returns the response object.
'''
if not isinstance(headers, CaseInsensitiveDict):
headers = CaseInsensitiveDict(headers)
self._authorize_request(method, path, headers, subres)
# Construct full path
if not self.hostname.startswith(self.bucket_name):
path = '/%s%s' % (self.bucket_name, path)
path = urllib.parse.quote(path)
if query_string:
s = urllib.parse.urlencode(query_string, doseq=True)
if subres:
path += '?%s&%s' % (subres, s)
else:
path += '?%s' % s
elif subres:
path += '?%s' % subres
# We can probably remove the assertions at some point and
# call self.conn.read_response() directly
def read_response():
resp = self.conn.read_response()
assert resp.method == method
assert resp.path == path
return resp
use_expect_100c = not self.options.get('disable-expect100', False)
try:
log.debug('sending %s %s', method, path)
if body is None or isinstance(body, (bytes, bytearray, memoryview)):
self.conn.send_request(method, path, body=body, headers=headers)
else:
body_len = os.fstat(body.fileno()).st_size
self.conn.send_request(method, path, expect100=use_expect_100c,
headers=headers, body=BodyFollowing(body_len))
if use_expect_100c:
resp = read_response()
if resp.status != 100: # Error
return resp
try:
copyfileobj(body, self.conn, BUFSIZE)
except ConnectionClosed:
# Server closed connection while we were writing body data -
# but we may still be able to read an error response
try:
resp = read_response()
except ConnectionClosed: # No server response available
pass
else:
if resp.status >= 400: # Got error response
return resp
log.warning('Server broke connection during upload, but signaled '
'%d %s', resp.status, resp.reason)
# Re-raise first ConnectionClosed exception
raise
return read_response()
except Exception as exc:
if is_temp_network_error(exc):
# We probably can't use the connection anymore
self.conn.disconnect()
raise
@copy_ancestor_docstring
def close(self):
self.conn.disconnect()
# NOTE: ! This function is also used by the swift backend !
def _extractmeta(self, resp, obj_key):
'''Extract metadata from HTTP response object'''
format_ = resp.headers.get('%smeta-format' % self.hdr_prefix, 'raw')
if format_ in ('raw', 'pickle'):
meta = CaseInsensitiveDict()
pattern = re.compile(r'^%smeta-(.+)$' % re.escape(self.hdr_prefix),
re.IGNORECASE)
for fname in resp.headers:
hit = pattern.search(fname)
if hit:
meta[hit.group(1)] = resp.headers[fname]
if format_ == 'raw':
return meta
# format_ == pickle
buf = ''.join(meta[x] for x in sorted(meta) if x.lower().startswith('data-'))
if 'md5' in meta and md5sum_b64(buf.encode('us-ascii')) != meta['md5']:
log.warning('MD5 mismatch in metadata for %s', obj_key)
raise BadDigestError('BadDigest', 'Meta MD5 for %s does not match' % obj_key)
try:
return safe_unpickle(b64decode(buf), encoding='latin1')
except binascii.Error:
raise CorruptedObjectError('Corrupted metadata, b64decode failed')
except pickle.UnpicklingError as exc:
raise CorruptedObjectError('Corrupted metadata, pickle says: %s' % exc)
elif format_ != 'raw2': # Current
raise RuntimeError('Unknown metadata format %s for key %s'
% (format_, obj_key))
parts = []
for i in count():
# Headers is an email.message object, so indexing it
# would also give None instead of KeyError
part = resp.headers.get('%smeta-%03d' % (self.hdr_prefix, i), None)
if part is None:
break
parts.append(part)
buf = unquote(''.join(parts))
meta = literal_eval('{ %s }' % buf)
# Decode bytes values
for (k,v) in meta.items():
if not isinstance(v, bytes):
continue
try:
meta[k] = b64decode(v)
except binascii.Error:
# This should trigger a MD5 mismatch below
meta[k] = None
# Check MD5. There is a case to be made for treating a mismatch as a
# `CorruptedObjectError` rather than a `BadDigestError`, because the MD5
# sum is not calculated on-the-fly by the server but stored with the
# object, and therefore does not actually verify what the server has
# sent over the wire. However, it seems more likely for the data to get
# accidentally corrupted in transit than to get accidentally corrupted
# on the server (which hopefully checksums its storage devices).
md5 = b64encode(checksum_basic_mapping(meta)).decode('ascii')
if md5 != resp.headers.get('%smeta-md5' % self.hdr_prefix, None):
log.warning('MD5 mismatch in metadata for %s', obj_key)
raise BadDigestError('BadDigest', 'Meta MD5 for %s does not match' % obj_key)
return meta
class ObjectR(object):
'''An S3 object open for reading'''
# NOTE: This class is used as a base class for the swift backend,
# so changes here should be checked for their effects on other
# backends.
def __init__(self, key, resp, backend, metadata=None):
self.key = key
self.resp = resp
self.closed = False
self.md5_checked = False
self.backend = backend
self.metadata = metadata
# False positive, hashlib *does* have md5 member
#pylint: disable=E1101
self.md5 = hashlib.md5()
def read(self, size=None):
'''Read up to *size* bytes of object data
For integrity checking to work, this method has to be called until
it returns an empty string, indicating that all data has been read
(and verified).
'''
if size == 0:
return b''
# This may raise an exception, in which case we probably can't
# re-use the connection. However, we rely on the caller
# to still close the file-like object, so that we can do
# cleanup in close().
buf = self.backend.conn.read(size)
self.md5.update(buf)
# Check MD5 on EOF
# (size == None implies EOF)
if (not buf or size is None) and not self.md5_checked:
etag = self.resp.headers['ETag'].strip('"')
self.md5_checked = True
if etag != self.md5.hexdigest():
log.warning('MD5 mismatch for %s: %s vs %s',
self.key, etag, self.md5.hexdigest())
raise BadDigestError('BadDigest',
'ETag header does not agree with calculated MD5')
return buf
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
return False
def close(self, checksum_warning=True):
'''Close object
If *checksum_warning* is true, this will generate a warning message if
the object has not been fully read (because in that case the MD5
checksum cannot be checked).
'''
if self.closed:
return
self.closed = True
# If we have not read all the data, close the entire
# connection (otherwise we loose synchronization)
if not self.md5_checked:
if checksum_warning:
log.warning("Object closed prematurely, can't check MD5, and have to "
"reset connection")
self.backend.conn.disconnect()
class ObjectW(object):
'''An S3 object open for writing
All data is first cached in memory, upload only starts when
the close() method is called.
'''
# NOTE: This class is used as a base class for the swift backend,
# so changes here should be checked for their effects on other
# backends.
def __init__(self, key, backend, headers):
self.key = key
self.backend = backend
self.headers = headers
self.closed = False
self.obj_size = 0
# According to http://docs.python.org/3/library/functions.html#open
# the buffer size is typically ~8 kB. We process data in much
# larger chunks, so buffering would only hurt performance.
self.fh = tempfile.TemporaryFile(buffering=0)
# False positive, hashlib *does* have md5 member
#pylint: disable=E1101
self.md5 = hashlib.md5()
def write(self, buf):
'''Write object data'''
self.fh.write(buf)
self.md5.update(buf)
self.obj_size += len(buf)
def is_temp_failure(self, exc):
return self.backend.is_temp_failure(exc)
@retry
def close(self):
'''Close object and upload data'''
# Access to protected member ok
#pylint: disable=W0212
log.debug('started with %s', self.key)
if self.closed:
# still call fh.close, may have generated an error before
self.fh.close()
return
self.fh.seek(0)
self.headers['Content-Type'] = 'application/octet-stream'
resp = self.backend._do_request('PUT', '/%s%s' % (self.backend.prefix, self.key),
headers=self.headers, body=self.fh)
etag = resp.headers['ETag'].strip('"')
self.backend._assert_empty_response(resp)
if etag != self.md5.hexdigest():
# delete may fail, but we don't want to loose the BadDigest exception
try:
self.backend.delete(self.key)
finally:
raise BadDigestError('BadDigest', 'MD5 mismatch for %s (received: %s, sent: %s)' %
(self.key, etag, self.md5.hexdigest()))
self.closed = True
self.fh.close()
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
return False
def get_obj_size(self):
if not self.closed:
raise RuntimeError('Object must be closed first.')
return self.obj_size
def get_S3Error(code, msg, headers=None):
'''Instantiate most specific S3Error subclass'''
# Special case
# http://code.google.com/p/s3ql/issues/detail?id=369
if code == 'Timeout':
code = 'RequestTimeout'
if code.endswith('Error'):
name = code
else:
name = code + 'Error'
class_ = globals().get(name, S3Error)
if not issubclass(class_, S3Error):
return S3Error(code, msg, headers)
return class_(code, msg, headers)
def md5sum_b64(buf):
'''Return base64 encoded MD5 sum'''
return b64encode(hashlib.md5(buf).digest()).decode('ascii')
def _parse_retry_after(header):
'''Parse headers for Retry-After value'''
hit = re.match(r'^\s*([0-9]+)\s*$', header)
if hit:
val = int(header)
else:
date = parsedate_tz(header)
if date is None:
log.warning('Unable to parse retry-after value: %s', header)
return None
val = mktime_tz(*date) - time.time()
if val > 300 or val < 0:
log.warning('Ignoring retry-after value of %.3f s, using 1 s instead', val)
val = 1
return val
class HTTPError(Exception):
'''
Represents an HTTP error returned by S3.
'''
def __init__(self, status, msg, headers=None):
super().__init__()
self.status = status
self.msg = msg
self.headers = headers
if headers and 'Retry-After' in headers:
self.retry_after = _parse_retry_after(headers['Retry-After'])
else:
self.retry_after = None
def __str__(self):
return '%d %s' % (self.status, self.msg)
class S3Error(Exception):
'''
Represents an error returned by S3. For possible codes, see
http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
'''
def __init__(self, code, msg, headers=None):
super().__init__(msg)
self.code = code
self.msg = msg
if headers and 'Retry-After' in headers:
self.retry_after = _parse_retry_after(headers['Retry-After'])
else:
self.retry_after = None
def __str__(self):
return '%s: %s' % (self.code, self.msg)
class NoSuchKeyError(S3Error): pass
class AccessDeniedError(S3Error, AuthorizationError): pass
class BadDigestError(S3Error): pass
class IncompleteBodyError(S3Error): pass
class InternalError(S3Error): pass
class InvalidAccessKeyIdError(S3Error, AuthenticationError): pass
class InvalidSecurityError(S3Error, AuthenticationError): pass
class SignatureDoesNotMatchError(S3Error, AuthenticationError): pass
class OperationAbortedError(S3Error): pass
class RequestTimeoutError(S3Error): pass
class SlowDownError(S3Error): pass
class RequestTimeTooSkewedError(S3Error): pass
class NoSuchBucketError(S3Error, DanglingStorageURLError): pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
Trait objects must include the `dyn` keyword.
Erroneous code example:
```edition2021,compile_fail,E0782
trait Foo {}
fn test(arg: Box<Foo>) {} // error!
```
Trait objects are a way to call methods on types that are not known until
runtime but conform to some trait.
Trait objects should be formed with `Box<dyn Foo>`, but in the code above
`dyn` is left off.
This makes it harder to see that `arg` is a trait object and not a
simply a heap allocated type called `Foo`.
To fix this issue, add `dyn` before the trait name.
```edition2021
trait Foo {}
fn test(arg: Box<dyn Foo>) {} // ok!
```
This used to be allowed before edition 2021, but is now an error.
|
unknown
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_error_codes/src/error_codes/E0782.md
|
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import db_helper as dh
from sahara.utils import files
def get_hive_db_password(cluster):
return dh.get_password_from_db(cluster, 'hive_db_password')
def get_sentry_db_password(cluster):
return dh.get_password_from_db(cluster, 'sentry_db_password')
def create_hive_database(cluster, remote):
db_password = get_hive_db_password(cluster)
create_db_script = files.get_file_text(
'plugins/cdh/v5_4_0/resources/create_hive_db.sql')
create_db_script = create_db_script % db_password
dh.remote_execute_db_script(remote, create_db_script)
def create_sentry_database(cluster, remote):
db_password = get_sentry_db_password(cluster)
create_db_script = files.get_file_text(
'plugins/cdh/v5_4_0/resources/create_sentry_db.sql')
create_db_script = create_db_script % db_password
dh.remote_execute_db_script(remote, create_db_script)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from unittest import TestCase
from functools import wraps
from nose.plugins.skip import SkipTest
from nltk.util import py26
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
is_test_class = isinstance(test_item, type) and issubclass(test_item, TestCase)
if is_test_class and py26():
# Patch all test_ methods to raise SkipText exception.
# This is necessary for Python 2.6 because its unittest
# doesn't understand __unittest_skip__.
for meth_name in (m for m in dir(test_item) if m.startswith('test_')):
patched_method = skip(reason)(getattr(test_item, meth_name))
setattr(test_item, meth_name, patched_method)
if not is_test_class:
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
skip_wrapper.__name__ = test_item.__name__
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return lambda obj: obj
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from shaker.shaker_core import *
from shaker.nodegroups import *
@login_required(login_url="/account/login/")
def manage_group(request):
group = NodeGroups()
all_group = group.list_groups()
return render(request, 'groups/manage_group.html', {'list_groups': all_group})
@login_required(login_url="/account/login/")
def add_group(request):
group = NodeGroups()
if request.POST:
groups = request.POST
for key in groups:
group_name = groups.get(key)
group.add_groups(group_name)
return HttpResponse(group_name)
@login_required(login_url="/account/login/")
def del_group(request):
group = NodeGroups()
if request.POST:
groups = request.POST
for key in groups:
group_name = groups.get(key)
group.del_groups(group_name)
return HttpResponse(group_name)
@login_required(login_url="/account/login/")
def modify_group(request):
group = NodeGroups()
if request.POST:
group_name = request.POST.get("groups_name")
modify_group_name = request.POST.get("modify")
group.modify_groups(group_name, modify_group_name)
return HttpResponse(group_name)
@login_required(login_url="/account/login/")
def manage_host(request):
group = NodeGroups()
all = group.list_groups_hosts()
return render(request, 'groups/manage_host.html', {'list_groups': all})
@login_required(login_url="/account/login/")
def add_host(request):
host = NodeGroups()
if request.POST:
group_name = request.POST.get("groups_name")
host_name = request.POST.get("hosts_name")
host.add_hosts(group_name, host_name)
return HttpResponse(host_name)
@login_required(login_url="/account/login/")
def del_host(request):
host = NodeGroups()
if request.POST:
group_name = request.POST.get("groups_name")
host_name = request.POST.get("hosts_name")
host.del_hosts(group_name, host_name)
return HttpResponse(host_name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource display info for the Calliope display module."""
class DisplayInfo(object):
"""Display info accumulator for priming Displayer.
"legacy" logic will be dropped when the incremental Command class refactor
is complete.
Attributes:
_format: The default format string. args.format takes precedence.
_transforms: The filter/format transforms symbol dict.
_aliases: The resource name alias dict.
_legacy: Use legacy Command methods for display info if True. This will
be deleted when all commands are refactored to use parser.display_info.
"""
def __init__(self):
self._legacy = True
self._format = None
self._transforms = {}
self._aliases = {}
# pylint: disable=redefined-builtin, name matches args.format and --format
def AddFormat(self, format):
"""Adds a format to the display info, newer info takes precedence.
Args:
format: The default format string. args.format takes precedence.
"""
self._legacy = False
if format:
self._format = format
def AddTransforms(self, transforms):
"""Adds transforms to the display info, newer values takes precedence.
Args:
transforms: A filter/format transforms symbol dict.
"""
self._legacy = False
if transforms:
self._transforms.update(transforms)
def AddAliases(self, aliases):
"""Adds aliases to the display info, newer values takes precedence.
Args:
aliases: The resource name alias dict.
"""
self._legacy = False
if aliases:
self._aliases.update(aliases)
def AddLowerDisplayInfo(self, display_info):
"""Add lower precedence display_info to the object.
This method is called by calliope to propagate CLI low precedence parent
info to its high precedence children.
Args:
display_info: The low precedence DisplayInfo object to add.
"""
if not self._format:
self._format = display_info.format
if display_info.transforms:
transforms = dict(display_info.transforms)
transforms.update(self.transforms)
self._transforms = transforms
if display_info.aliases:
aliases = dict(display_info.aliases)
aliases.update(self._aliases)
self._aliases = aliases
@property
def format(self):
return self._format
@property
def aliases(self):
return self._aliases
@property
def transforms(self):
return self._transforms
@property
def legacy(self):
return self._legacy
@legacy.setter
def legacy(self, value):
self._legacy = value
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"log"
"os"
"reflect"
"testing"
"github.com/stretchr/testify/require"
"sigs.k8s.io/yaml"
)
var (
certPath = "../../../tests/fixtures/server.crt"
privateKeyPath = "../../../tests/fixtures/server.key.insecure"
caPath = "../../../tests/fixtures/ca.crt"
)
func TestConfigFromFile(t *testing.T) {
tests := []struct {
ym *yamlConfig
werr bool
}{
{
&yamlConfig{},
false,
},
{
&yamlConfig{
InsecureTransport: true,
},
false,
},
{
&yamlConfig{
Keyfile: privateKeyPath,
Certfile: certPath,
TrustedCAfile: caPath,
InsecureSkipTLSVerify: true,
},
false,
},
{
&yamlConfig{
Keyfile: "bad",
Certfile: "bad",
},
true,
},
{
&yamlConfig{
Keyfile: privateKeyPath,
Certfile: certPath,
TrustedCAfile: "bad",
},
true,
},
}
for i, tt := range tests {
tmpfile, err := os.CreateTemp(t.TempDir(), "clientcfg")
if err != nil {
log.Fatal(err)
}
b, err := yaml.Marshal(tt.ym)
require.NoError(t, err)
_, err = tmpfile.Write(b)
require.NoError(t, err)
require.NoError(t, tmpfile.Close())
cfg, cerr := NewConfig(tmpfile.Name())
if cerr != nil && !tt.werr {
t.Errorf("#%d: err = %v, want %v", i, cerr, tt.werr)
continue
}
if cerr != nil {
os.Remove(tmpfile.Name())
continue
}
if !reflect.DeepEqual(cfg.Endpoints, tt.ym.Endpoints) {
t.Errorf("#%d: endpoint = %v, want %v", i, cfg.Endpoints, tt.ym.Endpoints)
}
if tt.ym.InsecureTransport != (cfg.TLS == nil) {
t.Errorf("#%d: insecureTransport = %v, want %v", i, cfg.TLS == nil, tt.ym.InsecureTransport)
}
if !tt.ym.InsecureTransport {
if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 {
t.Errorf("#%d: failed to load in cert", i)
}
if tt.ym.TrustedCAfile != "" && cfg.TLS.RootCAs == nil {
t.Errorf("#%d: failed to load in ca cert", i)
}
if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify {
t.Errorf("#%d: skipTLSVeify = %v, want %v", i, cfg.TLS.InsecureSkipVerify, tt.ym.InsecureSkipTLSVerify)
}
}
os.Remove(tmpfile.Name())
}
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
client/v3/yaml/config_test.go
|
""" Test functions for limits module.
"""
import types
import warnings
import pytest
import numpy as np
from numpy import double, half, longdouble, single
from numpy._core import finfo, iinfo
from numpy.testing import assert_, assert_equal, assert_raises
##################################################
class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
def assert_finfo_equal(f1, f2):
# assert two finfo instances have the same attributes
for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep',
'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
'nmant', 'precision', 'resolution', 'tiny',
'smallest_normal', 'smallest_subnormal'):
assert_equal(getattr(f1, attr), getattr(f2, attr),
f'finfo instances {f1} and {f2} differ on {attr}')
def assert_iinfo_equal(i1, i2):
# assert two iinfo instances have the same attributes
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(i1, attr), getattr(i2, attr),
f'iinfo instances {i1} and {i2} differ on {attr}')
class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
np.complex128]))
for dt1, dt2 in dts:
assert_finfo_equal(finfo(dt1), finfo(dt2))
assert_raises(ValueError, finfo, 'i4')
def test_regression_gh23108(self):
# np.float32(1.0) and np.float64(1.0) have the same hash and are
# equal under the == operator
f1 = np.finfo(np.float32(1.0))
f2 = np.finfo(np.float64(1.0))
assert f1 != f2
def test_regression_gh23867(self):
class NonHashableWithDtype:
__hash__ = None
dtype = np.dtype('float32')
x = NonHashableWithDtype()
assert np.finfo(x) == np.finfo(x.dtype)
def test_no_none_sense(self):
assert_raises(TypeError, finfo, None)
class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
assert_iinfo_equal(iinfo(dt1), iinfo(dt2))
assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np._core.sctypes['uint']
for T in types:
with np.errstate(over="ignore"):
max_calculated = T(0) - T(1)
assert_equal(iinfo(T).max, max_calculated)
class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\
" max=3.4028235e+38, dtype=float32)"
assert_equal(repr(np.finfo(np.float32)), expected)
def test_instances():
# Test the finfo and iinfo results on numeric instances agree with
# the results on the corresponding types
for c in [int, np.int16, np.int32, np.int64]:
class_iinfo = iinfo(c)
instance_iinfo = iinfo(c(12))
assert_iinfo_equal(class_iinfo, instance_iinfo)
for c in [float, np.float16, np.float32, np.float64]:
class_finfo = finfo(c)
instance_finfo = finfo(c(1.2))
assert_finfo_equal(class_finfo, instance_finfo)
with pytest.raises(ValueError):
iinfo(10.)
with pytest.raises(ValueError):
iinfo('hi')
with pytest.raises(ValueError):
finfo(np.int64(1))
def test_subnormal_warning():
"""Test that the subnormal is zero warning is not being raised."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Test for common float types
for dtype in [np.float16, np.float32, np.float64]:
f = finfo(dtype)
_ = f.smallest_subnormal
# Also test longdouble
with np.errstate(all='ignore'):
fld = finfo(np.longdouble)
_ = fld.smallest_subnormal
# Check no warnings were raised
assert len(w) == 0
def test_plausible_finfo():
# Assert that finfo returns reasonable results for all types
for ftype in np._core.sctypes['float'] + np._core.sctypes['complex']:
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
class TestRuntimeSubscriptable:
def test_finfo_generic(self):
assert isinstance(np.finfo[np.float64], types.GenericAlias)
def test_iinfo_generic(self):
assert isinstance(np.iinfo[np.int_], types.GenericAlias)
|
python
|
github
|
https://github.com/numpy/numpy
|
numpy/_core/tests/test_getlimits.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.